text
stringlengths 4
1.02M
| meta
dict |
---|---|
"""
Refresh any files in ../virtualenv_support/ that come from elsewhere
"""
import os
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
import sys
here = os.path.dirname(__file__)
support_location = os.path.join(here, '..', 'virtualenv_support')
embedded_location = os.path.join(here, '..', 'virtualenv_embedded')
embedded_files = [
('http://peak.telecommunity.com/dist/ez_setup.py', 'ez_setup.py'),
('http://python-distribute.org/distribute_setup.py', 'distribute_setup.py'),
]
support_files = [
('http://pypi.python.org/packages/2.6/s/setuptools/setuptools-0.6c11-py2.6.egg', 'setuptools-0.6c11-py2.6.egg'),
('http://pypi.python.org/packages/2.5/s/setuptools/setuptools-0.6c11-py2.5.egg', 'setuptools-0.6c11-py2.5.egg'),
('http://pypi.python.org/packages/2.4/s/setuptools/setuptools-0.6c11-py2.4.egg', 'setuptools-0.6c11-py2.4.egg'),
('http://pypi.python.org/packages/source/d/distribute/distribute-0.6.24.tar.gz', 'distribute-0.6.24.tar.gz'),
('http://pypi.python.org/packages/source/p/pip/pip-1.1.tar.gz', 'pip-1.1.tar.gz'),
]
def refresh_files(files, location):
for url, filename in files:
sys.stdout.write('fetching %s ... ' % url)
sys.stdout.flush()
f = urlopen(url)
content = f.read()
f.close()
print('done.')
filename = os.path.join(location, filename)
if os.path.exists(filename):
f = open(filename, 'rb')
cur_content = f.read()
f.close()
else:
cur_content = ''
if cur_content == content:
print(' %s up-to-date' % filename)
else:
print(' overwriting %s' % filename)
f = open(filename, 'wb')
f.write(content)
f.close()
def main():
refresh_files(embedded_files, embedded_location)
refresh_files(support_files, support_location)
if __name__ == '__main__':
main()
| {
"content_hash": "ba0075d9940f0003dd8cb5507eaca5b2",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 116,
"avg_line_length": 32.295081967213115,
"alnum_prop": 0.6131979695431472,
"repo_name": "msabramo/virtualenv",
"id": "66274b4424963e88a90f509a8a618f792a7bfa2f",
"size": "1992",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "bin/refresh-support-files.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9205"
},
{
"name": "JavaScript",
"bytes": "19221"
},
{
"name": "PowerShell",
"bytes": "8252"
},
{
"name": "Python",
"bytes": "172878"
},
{
"name": "Shell",
"bytes": "14075"
}
],
"symlink_target": ""
} |
from selenium.webdriver.common.keys import Keys
from tests.integration.tests.test_fileinput import Test as TestFileInput
from . import VisualTest
class Test(VisualTest):
urls = TestFileInput.urls
def test_test_default_usecase(self):
self.driver.get('%s%s' % (self.live_server_url, TestFileInput.test_default_usecase.url))
self.assertScreenshot('form', 'fileinput_default_usecase', threshold=1)
def test_invalid_value(self):
self.driver.get('%s%s' % (self.live_server_url, TestFileInput.test_invalid_value.url))
self.driver.find_element_by_css_selector("button").send_keys(Keys.RETURN)
self.assertScreenshot('form', 'fileinput_missing_value_error', threshold=1)
def test_part_group_class(self):
self.driver.get('%s%s' % (self.live_server_url, TestFileInput.test_part_group_class.url))
self.assertScreenshot('form', 'fileinput_part_group_class', threshold=1)
def test_part_add_group_class(self):
self.driver.get('%s%s' % (self.live_server_url, TestFileInput.test_part_add_group_class.url))
self.assertScreenshot('form', 'fileinput_part_add_group_class', threshold=1)
def test_part_prefix(self):
self.driver.get('%s%s' % (self.live_server_url, TestFileInput.test_part_prefix.url))
self.assertScreenshot('form', 'fileinput_part_prefix', threshold=1)
def test_part_add_control_class(self):
self.driver.get('%s%s' % (self.live_server_url, TestFileInput.test_part_add_control_class.url))
self.driver.find_element_by_css_selector("#id_test_field_container label").click()
self.assertScreenshot('form', 'fileinput_part_add_control_class', threshold=1)
def test_part_label(self):
self.driver.get('%s%s' % (self.live_server_url, TestFileInput.test_part_label.url))
self.assertScreenshot('form', 'fileinput_part_label', threshold=1)
def test_part_add_label_class(self):
self.driver.get('%s%s' % (self.live_server_url, TestFileInput.test_part_add_label_class.url))
self.assertScreenshot('form', 'fileinput_part_add_label_class', threshold=1)
def test_part_help_text(self):
self.driver.get('%s%s' % (self.live_server_url, TestFileInput.test_part_help_text.url))
self.assertScreenshot('form', 'fileinput_part_help_text', threshold=1)
def test_part_errors(self):
self.driver.get('%s%s' % (self.live_server_url, TestFileInput.test_part_errors.url))
self.assertScreenshot('form', 'fileinput_part_errors', threshold=1)
| {
"content_hash": "dbe4c5e06f1c3ef593c03afb55d30434",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 103,
"avg_line_length": 50.66,
"alnum_prop": 0.6936439005132254,
"repo_name": "sourabhdattawad/django-material",
"id": "77a1e66058030b3eedccca4f6bcd422a0647c30c",
"size": "2533",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tests/visual/tests/test_fileinput.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "218692"
},
{
"name": "HTML",
"bytes": "111577"
},
{
"name": "JavaScript",
"bytes": "361169"
},
{
"name": "Python",
"bytes": "173076"
}
],
"symlink_target": ""
} |
"""
Main Random Variables Module
Defines abstract random variable type.
Contains interfaces for probability space object (PSpace) as well as standard
operators, P, E, sample, density, where
See Also
========
sympy.stats.crv
sympy.stats.frv
sympy.stats.rv_interface
"""
from __future__ import print_function, division
from sympy import (Basic, S, Expr, Symbol, Tuple, And, Add, Eq, lambdify,
Equality, Lambda, DiracDelta, sympify)
from sympy.core.relational import Relational
from sympy.logic.boolalg import Boolean
from sympy.solvers.solveset import solveset
from sympy.sets.sets import FiniteSet, ProductSet, Intersection
from sympy.abc import x
class RandomDomain(Basic):
"""
Represents a set of variables and the values which they can take
See Also
========
sympy.stats.crv.ContinuousDomain
sympy.stats.frv.FiniteDomain
"""
is_ProductDomain = False
is_Finite = False
is_Continuous = False
def __new__(cls, symbols, *args):
symbols = FiniteSet(*symbols)
return Basic.__new__(cls, symbols, *args)
@property
def symbols(self):
return self.args[0]
@property
def set(self):
return self.args[1]
def __contains__(self, other):
raise NotImplementedError()
def integrate(self, expr):
raise NotImplementedError()
class SingleDomain(RandomDomain):
"""
A single variable and its domain
See Also
========
sympy.stats.crv.SingleContinuousDomain
sympy.stats.frv.SingleFiniteDomain
"""
def __new__(cls, symbol, set):
assert symbol.is_Symbol
return Basic.__new__(cls, symbol, set)
@property
def symbol(self):
return self.args[0]
@property
def symbols(self):
return FiniteSet(self.symbol)
def __contains__(self, other):
if len(other) != 1:
return False
sym, val = tuple(other)[0]
return self.symbol == sym and val in self.set
class ConditionalDomain(RandomDomain):
"""
A RandomDomain with an attached condition
See Also
========
sympy.stats.crv.ConditionalContinuousDomain
sympy.stats.frv.ConditionalFiniteDomain
"""
def __new__(cls, fulldomain, condition):
condition = condition.xreplace(dict((rs, rs.symbol)
for rs in random_symbols(condition)))
return Basic.__new__(cls, fulldomain, condition)
@property
def symbols(self):
return self.fulldomain.symbols
@property
def fulldomain(self):
return self.args[0]
@property
def condition(self):
return self.args[1]
@property
def set(self):
raise NotImplementedError("Set of Conditional Domain not Implemented")
def as_boolean(self):
return And(self.fulldomain.as_boolean(), self.condition)
class PSpace(Basic):
"""
A Probability Space
Probability Spaces encode processes that equal different values
probabilistically. These underly Random Symbols which occur in SymPy
expressions and contain the mechanics to evaluate statistical statements.
See Also
========
sympy.stats.crv.ContinuousPSpace
sympy.stats.frv.FinitePSpace
"""
is_Finite = None
is_Continuous = None
is_real = None
@property
def domain(self):
return self.args[0]
@property
def density(self):
return self.args[1]
@property
def values(self):
return frozenset(RandomSymbol(sym, self) for sym in self.domain.symbols)
@property
def symbols(self):
return self.domain.symbols
def where(self, condition):
raise NotImplementedError()
def compute_density(self, expr):
raise NotImplementedError()
def sample(self):
raise NotImplementedError()
def probability(self, condition):
raise NotImplementedError()
def integrate(self, expr):
raise NotImplementedError()
class SinglePSpace(PSpace):
"""
Represents the probabilities of a set of random events that can be
attributed to a single variable/symbol.
"""
def __new__(cls, s, distribution):
if isinstance(s, str):
s = Symbol(s)
if not isinstance(s, Symbol):
raise TypeError("s should have been string or Symbol")
return Basic.__new__(cls, s, distribution)
@property
def value(self):
return RandomSymbol(self.symbol, self)
@property
def symbol(self):
return self.args[0]
@property
def distribution(self):
return self.args[1]
@property
def pdf(self):
return self.distribution.pdf(self.symbol)
class RandomSymbol(Expr):
"""
Random Symbols represent ProbabilitySpaces in SymPy Expressions
In principle they can take on any value that their symbol can take on
within the associated PSpace with probability determined by the PSpace
Density.
Random Symbols contain pspace and symbol properties.
The pspace property points to the represented Probability Space
The symbol is a standard SymPy Symbol that is used in that probability space
for example in defining a density.
You can form normal SymPy expressions using RandomSymbols and operate on
those expressions with the Functions
E - Expectation of a random expression
P - Probability of a condition
density - Probability Density of an expression
given - A new random expression (with new random symbols) given a condition
An object of the RandomSymbol type should almost never be created by the
user. They tend to be created instead by the PSpace class's value method.
Traditionally a user doesn't even do this but instead calls one of the
convenience functions Normal, Exponential, Coin, Die, FiniteRV, etc....
"""
def __new__(cls, symbol, pspace=None):
if pspace is None:
# Allow single arg, representing pspace == PSpace()
pspace = PSpace()
if not isinstance(symbol, Symbol):
raise TypeError("symbol should be of type Symbol")
if not isinstance(pspace, PSpace):
raise TypeError("pspace variable should be of type PSpace")
return Basic.__new__(cls, symbol, pspace)
is_finite = True
is_Symbol = True
is_Atom = True
_diff_wrt = True
pspace = property(lambda self: self.args[1])
symbol = property(lambda self: self.args[0])
name = property(lambda self: self.symbol.name)
def _eval_is_positive(self):
return self.symbol.is_positive
def _eval_is_integer(self):
return self.symbol.is_integer
def _eval_is_real(self):
return self.symbol.is_real or self.pspace.is_real
@property
def is_commutative(self):
return self.symbol.is_commutative
def _hashable_content(self):
return self.pspace, self.symbol
@property
def free_symbols(self):
return {self}
class ProductPSpace(PSpace):
"""
A probability space resulting from the merger of two independent probability
spaces.
Often created using the function, pspace
"""
def __new__(cls, *spaces):
rs_space_dict = {}
for space in spaces:
for value in space.values:
rs_space_dict[value] = space
symbols = FiniteSet(*[val.symbol for val in rs_space_dict.keys()])
# Overlapping symbols
if len(symbols) < sum(len(space.symbols) for space in spaces):
raise ValueError("Overlapping Random Variables")
if all(space.is_Finite for space in spaces):
from sympy.stats.frv import ProductFinitePSpace
cls = ProductFinitePSpace
if all(space.is_Continuous for space in spaces):
from sympy.stats.crv import ProductContinuousPSpace
cls = ProductContinuousPSpace
obj = Basic.__new__(cls, *FiniteSet(*spaces))
return obj
@property
def rs_space_dict(self):
d = {}
for space in self.spaces:
for value in space.values:
d[value] = space
return d
@property
def symbols(self):
return FiniteSet(*[val.symbol for val in self.rs_space_dict.keys()])
@property
def spaces(self):
return FiniteSet(*self.args)
@property
def values(self):
return sumsets(space.values for space in self.spaces)
def integrate(self, expr, rvs=None, **kwargs):
rvs = rvs or self.values
rvs = frozenset(rvs)
for space in self.spaces:
expr = space.integrate(expr, rvs & space.values, **kwargs)
return expr
@property
def domain(self):
return ProductDomain(*[space.domain for space in self.spaces])
@property
def density(self):
raise NotImplementedError("Density not available for ProductSpaces")
def sample(self):
return dict([(k, v) for space in self.spaces
for k, v in space.sample().items()])
class ProductDomain(RandomDomain):
"""
A domain resulting from the merger of two independent domains
See Also
========
sympy.stats.crv.ProductContinuousDomain
sympy.stats.frv.ProductFiniteDomain
"""
is_ProductDomain = True
def __new__(cls, *domains):
symbols = sumsets([domain.symbols for domain in domains])
# Flatten any product of products
domains2 = []
for domain in domains:
if not domain.is_ProductDomain:
domains2.append(domain)
else:
domains2.extend(domain.domains)
domains2 = FiniteSet(*domains2)
if all(domain.is_Finite for domain in domains2):
from sympy.stats.frv import ProductFiniteDomain
cls = ProductFiniteDomain
if all(domain.is_Continuous for domain in domains2):
from sympy.stats.crv import ProductContinuousDomain
cls = ProductContinuousDomain
return Basic.__new__(cls, *domains2)
@property
def sym_domain_dict(self):
return dict((symbol, domain) for domain in self.domains
for symbol in domain.symbols)
@property
def symbols(self):
return FiniteSet(*[sym for domain in self.domains
for sym in domain.symbols])
@property
def domains(self):
return self.args
@property
def set(self):
return ProductSet(domain.set for domain in self.domains)
def __contains__(self, other):
# Split event into each subdomain
for domain in self.domains:
# Collect the parts of this event which associate to this domain
elem = frozenset([item for item in other
if sympify(domain.symbols.contains(item[0]))
is S.true])
# Test this sub-event
if elem not in domain:
return False
# All subevents passed
return True
def as_boolean(self):
return And(*[domain.as_boolean() for domain in self.domains])
def random_symbols(expr):
"""
Returns all RandomSymbols within a SymPy Expression.
"""
try:
return list(expr.atoms(RandomSymbol))
except AttributeError:
return []
def pspace(expr):
"""
Returns the underlying Probability Space of a random expression.
For internal use.
Examples
========
>>> from sympy.stats import pspace, Normal
>>> from sympy.stats.rv import ProductPSpace
>>> X = Normal('X', 0, 1)
>>> pspace(2*X + 1) == X.pspace
True
"""
expr = sympify(expr)
rvs = random_symbols(expr)
if not rvs:
raise ValueError("Expression containing Random Variable expected, not %s" % (expr))
# If only one space present
if all(rv.pspace == rvs[0].pspace for rv in rvs):
return rvs[0].pspace
# Otherwise make a product space
return ProductPSpace(*[rv.pspace for rv in rvs])
def sumsets(sets):
"""
Union of sets
"""
return frozenset().union(*sets)
def rs_swap(a, b):
"""
Build a dictionary to swap RandomSymbols based on their underlying symbol.
i.e.
if ``X = ('x', pspace1)``
and ``Y = ('x', pspace2)``
then ``X`` and ``Y`` match and the key, value pair
``{X:Y}`` will appear in the result
Inputs: collections a and b of random variables which share common symbols
Output: dict mapping RVs in a to RVs in b
"""
d = {}
for rsa in a:
d[rsa] = [rsb for rsb in b if rsa.symbol == rsb.symbol][0]
return d
def given(expr, condition=None, **kwargs):
""" Conditional Random Expression
From a random expression and a condition on that expression creates a new
probability space from the condition and returns the same expression on that
conditional probability space.
Examples
========
>>> from sympy.stats import given, density, Die
>>> X = Die('X', 6)
>>> Y = given(X, X > 3)
>>> density(Y).dict
{4: 1/3, 5: 1/3, 6: 1/3}
Following convention, if the condition is a random symbol then that symbol
is considered fixed.
>>> from sympy.stats import Normal
>>> from sympy import pprint
>>> from sympy.abc import z
>>> X = Normal('X', 0, 1)
>>> Y = Normal('Y', 0, 1)
>>> pprint(density(X + Y, Y)(z), use_unicode=False)
2
-(-Y + z)
-----------
___ 2
\/ 2 *e
------------------
____
2*\/ pi
"""
if not random_symbols(condition) or pspace_independent(expr, condition):
return expr
if isinstance(condition, RandomSymbol):
condition = Eq(condition, condition.symbol)
condsymbols = random_symbols(condition)
if (isinstance(condition, Equality) and len(condsymbols) == 1 and
not isinstance(pspace(expr).domain, ConditionalDomain)):
rv = tuple(condsymbols)[0]
results = solveset(condition, rv)
if isinstance(results, Intersection) and S.Reals in results.args:
results = list(results.args[1])
return sum(expr.subs(rv, res) for res in results)
# Get full probability space of both the expression and the condition
fullspace = pspace(Tuple(expr, condition))
# Build new space given the condition
space = fullspace.conditional_space(condition, **kwargs)
# Dictionary to swap out RandomSymbols in expr with new RandomSymbols
# That point to the new conditional space
swapdict = rs_swap(fullspace.values, space.values)
# Swap random variables in the expression
expr = expr.xreplace(swapdict)
return expr
def expectation(expr, condition=None, numsamples=None, evaluate=True, **kwargs):
"""
Returns the expected value of a random expression
Parameters
==========
expr : Expr containing RandomSymbols
The expression of which you want to compute the expectation value
given : Expr containing RandomSymbols
A conditional expression. E(X, X>0) is expectation of X given X > 0
numsamples : int
Enables sampling and approximates the expectation with this many samples
evalf : Bool (defaults to True)
If sampling return a number rather than a complex expression
evaluate : Bool (defaults to True)
In case of continuous systems return unevaluated integral
Examples
========
>>> from sympy.stats import E, Die
>>> X = Die('X', 6)
>>> E(X)
7/2
>>> E(2*X + 1)
8
>>> E(X, X > 3) # Expectation of X given that it is above 3
5
"""
if not random_symbols(expr): # expr isn't random?
return expr
if numsamples: # Computing by monte carlo sampling?
return sampling_E(expr, condition, numsamples=numsamples)
# Create new expr and recompute E
if condition is not None: # If there is a condition
return expectation(given(expr, condition), evaluate=evaluate)
# A few known statements for efficiency
if expr.is_Add: # We know that E is Linear
return Add(*[expectation(arg, evaluate=evaluate)
for arg in expr.args])
# Otherwise case is simple, pass work off to the ProbabilitySpace
result = pspace(expr).integrate(expr)
if evaluate and hasattr(result, 'doit'):
return result.doit(**kwargs)
else:
return result
def probability(condition, given_condition=None, numsamples=None,
evaluate=True, **kwargs):
"""
Probability that a condition is true, optionally given a second condition
Parameters
==========
condition : Combination of Relationals containing RandomSymbols
The condition of which you want to compute the probability
given_condition : Combination of Relationals containing RandomSymbols
A conditional expression. P(X > 1, X > 0) is expectation of X > 1
given X > 0
numsamples : int
Enables sampling and approximates the probability with this many samples
evaluate : Bool (defaults to True)
In case of continuous systems return unevaluated integral
Examples
========
>>> from sympy.stats import P, Die
>>> from sympy import Eq
>>> X, Y = Die('X', 6), Die('Y', 6)
>>> P(X > 3)
1/2
>>> P(Eq(X, 5), X > 2) # Probability that X == 5 given that X > 2
1/4
>>> P(X > Y)
5/12
"""
condition = sympify(condition)
given_condition = sympify(given_condition)
if given_condition is not None and \
not isinstance(given_condition, (Relational, Boolean)):
raise ValueError("%s is not a relational or combination of relationals"
% (given_condition))
if given_condition == False:
return S.Zero
if not isinstance(condition, (Relational, Boolean)):
raise ValueError("%s is not a relational or combination of relationals"
% (condition))
if condition is S.true:
return S.One
if condition is S.false:
return S.Zero
if numsamples:
return sampling_P(condition, given_condition, numsamples=numsamples,
**kwargs)
if given_condition is not None: # If there is a condition
# Recompute on new conditional expr
return probability(given(condition, given_condition, **kwargs), **kwargs)
# Otherwise pass work off to the ProbabilitySpace
result = pspace(condition).probability(condition, **kwargs)
if evaluate and hasattr(result, 'doit'):
return result.doit()
else:
return result
class Density(Basic):
expr = property(lambda self: self.args[0])
@property
def condition(self):
if len(self.args) > 1:
return self.args[1]
else:
return None
def doit(self, evaluate=True, **kwargs):
expr, condition = self.expr, self.condition
if condition is not None:
# Recompute on new conditional expr
expr = given(expr, condition, **kwargs)
if not random_symbols(expr):
return Lambda(x, DiracDelta(x - expr))
if (isinstance(expr, RandomSymbol) and
hasattr(expr.pspace, 'distribution') and
isinstance(pspace(expr), SinglePSpace)):
return expr.pspace.distribution
result = pspace(expr).compute_density(expr, **kwargs)
if evaluate and hasattr(result, 'doit'):
return result.doit()
else:
return result
def density(expr, condition=None, evaluate=True, numsamples=None, **kwargs):
"""
Probability density of a random expression, optionally given a second
condition.
This density will take on different forms for different types of
probability spaces. Discrete variables produce Dicts. Continuous
variables produce Lambdas.
Parameters
==========
expr : Expr containing RandomSymbols
The expression of which you want to compute the density value
condition : Relational containing RandomSymbols
A conditional expression. density(X > 1, X > 0) is density of X > 1
given X > 0
numsamples : int
Enables sampling and approximates the density with this many samples
Examples
========
>>> from sympy.stats import density, Die, Normal
>>> from sympy import Symbol
>>> x = Symbol('x')
>>> D = Die('D', 6)
>>> X = Normal(x, 0, 1)
>>> density(D).dict
{1: 1/6, 2: 1/6, 3: 1/6, 4: 1/6, 5: 1/6, 6: 1/6}
>>> density(2*D).dict
{2: 1/6, 4: 1/6, 6: 1/6, 8: 1/6, 10: 1/6, 12: 1/6}
>>> density(X)(x)
sqrt(2)*exp(-x**2/2)/(2*sqrt(pi))
"""
if numsamples:
return sampling_density(expr, condition, numsamples=numsamples,
**kwargs)
return Density(expr, condition).doit(evaluate=evaluate, **kwargs)
def cdf(expr, condition=None, evaluate=True, **kwargs):
"""
Cumulative Distribution Function of a random expression.
optionally given a second condition
This density will take on different forms for different types of
probability spaces.
Discrete variables produce Dicts.
Continuous variables produce Lambdas.
Examples
========
>>> from sympy.stats import density, Die, Normal, cdf
>>> from sympy import Symbol
>>> D = Die('D', 6)
>>> X = Normal('X', 0, 1)
>>> density(D).dict
{1: 1/6, 2: 1/6, 3: 1/6, 4: 1/6, 5: 1/6, 6: 1/6}
>>> cdf(D)
{1: 1/6, 2: 1/3, 3: 1/2, 4: 2/3, 5: 5/6, 6: 1}
>>> cdf(3*D, D > 2)
{9: 1/4, 12: 1/2, 15: 3/4, 18: 1}
>>> cdf(X)
Lambda(_z, -erfc(sqrt(2)*_z/2)/2 + 1)
"""
if condition is not None: # If there is a condition
# Recompute on new conditional expr
return cdf(given(expr, condition, **kwargs), **kwargs)
# Otherwise pass work off to the ProbabilitySpace
result = pspace(expr).compute_cdf(expr, **kwargs)
if evaluate and hasattr(result, 'doit'):
return result.doit()
else:
return result
def where(condition, given_condition=None, **kwargs):
"""
Returns the domain where a condition is True.
Examples
========
>>> from sympy.stats import where, Die, Normal
>>> from sympy import symbols, And
>>> D1, D2 = Die('a', 6), Die('b', 6)
>>> a, b = D1.symbol, D2.symbol
>>> X = Normal('x', 0, 1)
>>> where(X**2<1)
Domain: And(-1 < x, x < 1)
>>> where(X**2<1).set
(-1, 1)
>>> where(And(D1<=D2 , D2<3))
Domain: Or(And(Eq(a, 1), Eq(b, 1)), And(Eq(a, 1), Eq(b, 2)), And(Eq(a, 2), Eq(b, 2))) """
if given_condition is not None: # If there is a condition
# Recompute on new conditional expr
return where(given(condition, given_condition, **kwargs), **kwargs)
# Otherwise pass work off to the ProbabilitySpace
return pspace(condition).where(condition, **kwargs)
def sample(expr, condition=None, **kwargs):
"""
A realization of the random expression
Examples
========
>>> from sympy.stats import Die, sample
>>> X, Y, Z = Die('X', 6), Die('Y', 6), Die('Z', 6)
>>> die_roll = sample(X + Y + Z) # A random realization of three dice
"""
return next(sample_iter(expr, condition, numsamples=1))
def sample_iter(expr, condition=None, numsamples=S.Infinity, **kwargs):
"""
Returns an iterator of realizations from the expression given a condition
expr: Random expression to be realized
condition: A conditional expression (optional)
numsamples: Length of the iterator (defaults to infinity)
Examples
========
>>> from sympy.stats import Normal, sample_iter
>>> X = Normal('X', 0, 1)
>>> expr = X*X + 3
>>> iterator = sample_iter(expr, numsamples=3)
>>> list(iterator) # doctest: +SKIP
[12, 4, 7]
See Also
========
Sample
sampling_P
sampling_E
sample_iter_lambdify
sample_iter_subs
"""
# lambdify is much faster but not as robust
try:
return sample_iter_lambdify(expr, condition, numsamples, **kwargs)
# use subs when lambdify fails
except TypeError:
return sample_iter_subs(expr, condition, numsamples, **kwargs)
def sample_iter_lambdify(expr, condition=None, numsamples=S.Infinity, **kwargs):
"""
See sample_iter
Uses lambdify for computation. This is fast but does not always work.
"""
if condition:
ps = pspace(Tuple(expr, condition))
else:
ps = pspace(expr)
rvs = list(ps.values)
fn = lambdify(rvs, expr, **kwargs)
if condition:
given_fn = lambdify(rvs, condition, **kwargs)
# Check that lambdify can handle the expression
# Some operations like Sum can prove difficult
try:
d = ps.sample() # a dictionary that maps RVs to values
args = [d[rv] for rv in rvs]
fn(*args)
if condition:
given_fn(*args)
except Exception:
raise TypeError("Expr/condition too complex for lambdify")
def return_generator():
count = 0
while count < numsamples:
d = ps.sample() # a dictionary that maps RVs to values
args = [d[rv] for rv in rvs]
if condition: # Check that these values satisfy the condition
gd = given_fn(*args)
if gd != True and gd != False:
raise ValueError(
"Conditions must not contain free symbols")
if not gd: # If the values don't satisfy then try again
continue
yield fn(*args)
count += 1
return return_generator()
def sample_iter_subs(expr, condition=None, numsamples=S.Infinity, **kwargs):
"""
See sample_iter
Uses subs for computation. This is slow but almost always works.
"""
if condition is not None:
ps = pspace(Tuple(expr, condition))
else:
ps = pspace(expr)
count = 0
while count < numsamples:
d = ps.sample() # a dictionary that maps RVs to values
if condition is not None: # Check that these values satisfy the condition
gd = condition.xreplace(d)
if gd != True and gd != False:
raise ValueError("Conditions must not contain free symbols")
if not gd: # If the values don't satisfy then try again
continue
yield expr.xreplace(d)
count += 1
def sampling_P(condition, given_condition=None, numsamples=1,
evalf=True, **kwargs):
"""
Sampling version of P
See Also
========
P
sampling_E
sampling_density
"""
count_true = 0
count_false = 0
samples = sample_iter(condition, given_condition,
numsamples=numsamples, **kwargs)
for x in samples:
if x != True and x != False:
raise ValueError("Conditions must not contain free symbols")
if x:
count_true += 1
else:
count_false += 1
result = S(count_true) / numsamples
if evalf:
return result.evalf()
else:
return result
def sampling_E(expr, given_condition=None, numsamples=1,
evalf=True, **kwargs):
"""
Sampling version of E
See Also
========
P
sampling_P
sampling_density
"""
samples = sample_iter(expr, given_condition,
numsamples=numsamples, **kwargs)
result = Add(*list(samples)) / numsamples
if evalf:
return result.evalf()
else:
return result
def sampling_density(expr, given_condition=None, numsamples=1, **kwargs):
"""
Sampling version of density
See Also
========
density
sampling_P
sampling_E
"""
results = {}
for result in sample_iter(expr, given_condition,
numsamples=numsamples, **kwargs):
results[result] = results.get(result, 0) + 1
return results
def dependent(a, b):
"""
Dependence of two random expressions
Two expressions are independent if knowledge of one does not change
computations on the other.
Examples
========
>>> from sympy.stats import Normal, dependent, given
>>> from sympy import Tuple, Eq
>>> X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
>>> dependent(X, Y)
False
>>> dependent(2*X + Y, -Y)
True
>>> X, Y = given(Tuple(X, Y), Eq(X + Y, 3))
>>> dependent(X, Y)
True
See Also
========
independent
"""
if pspace_independent(a, b):
return False
z = Symbol('z', real=True)
# Dependent if density is unchanged when one is given information about
# the other
return (density(a, Eq(b, z)) != density(a) or
density(b, Eq(a, z)) != density(b))
def independent(a, b):
"""
Independence of two random expressions
Two expressions are independent if knowledge of one does not change
computations on the other.
Examples
========
>>> from sympy.stats import Normal, independent, given
>>> from sympy import Tuple, Eq
>>> X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
>>> independent(X, Y)
True
>>> independent(2*X + Y, -Y)
False
>>> X, Y = given(Tuple(X, Y), Eq(X + Y, 3))
>>> independent(X, Y)
False
See Also
========
dependent
"""
return not dependent(a, b)
def pspace_independent(a, b):
"""
Tests for independence between a and b by checking if their PSpaces have
overlapping symbols. This is a sufficient but not necessary condition for
independence and is intended to be used internally.
Notes
=====
pspace_independent(a, b) implies independent(a, b)
independent(a, b) does not imply pspace_independent(a, b)
"""
a_symbols = set(pspace(b).symbols)
b_symbols = set(pspace(a).symbols)
if len(a_symbols.intersection(b_symbols)) == 0:
return True
return None
def rv_subs(expr, symbols=None):
"""
Given a random expression replace all random variables with their symbols.
If symbols keyword is given restrict the swap to only the symbols listed.
"""
if symbols is None:
symbols = random_symbols(expr)
if not symbols:
return expr
swapdict = {rv: rv.symbol for rv in symbols}
return expr.xreplace(swapdict)
class NamedArgsMixin(object):
_argnames = ()
def __getattr__(self, attr):
try:
return self.args[self._argnames.index(attr)]
except ValueError:
raise AttributeError("'%s' object has not attribute '%s'" % (
type(self).__name__, attr))
def _value_check(condition, message):
"""
Check a condition on input value.
Raises ValueError with message if condition is not True
"""
if condition == False:
raise ValueError(message)
| {
"content_hash": "1f20d8b0d83e5c1433bd7c197ada2817",
"timestamp": "",
"source": "github",
"line_count": 1107,
"max_line_length": 96,
"avg_line_length": 27.883468834688347,
"alnum_prop": 0.6135030939190721,
"repo_name": "rahuldan/sympy",
"id": "89d705d1aed1ccca9420c4723eca36397b984ebf",
"size": "30867",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "sympy/stats/rv.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "14733352"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "5952"
},
{
"name": "Tcl",
"bytes": "1048"
},
{
"name": "XSLT",
"bytes": "366200"
}
],
"symlink_target": ""
} |
from oslo_config import cfg
import pkg_resources
from neutron._i18n import _
MIGRATION_ENTRYPOINTS = 'neutron.db.alembic_migrations'
migration_entrypoints = {
entrypoint.name: entrypoint
for entrypoint in pkg_resources.iter_entry_points(MIGRATION_ENTRYPOINTS)
}
INSTALLED_SUBPROJECTS = [project_ for project_ in migration_entrypoints]
CORE_OPTS = [
cfg.StrOpt('subproject',
choices=INSTALLED_SUBPROJECTS,
help=(_("The subproject to execute the command against. "
"Can be one of: '%s'.")
% "', '".join(INSTALLED_SUBPROJECTS))),
cfg.BoolOpt('split_branches',
default=True,
deprecated_for_removal=True,
help=_("DEPRECATED in newton, will be removed in ocata."
"Alembic environments integrating with "
"Neutron must implement split (contract and expand) "
"branches file structure."))
]
DB_OPTS = [
cfg.StrOpt('connection',
default='',
secret=True,
help=_('URL to database')),
cfg.StrOpt('engine',
default='',
help=_('Database engine for which script will be generated '
'when using offline migration.')),
]
def register_db_cli_opts(conf):
conf.register_cli_opts(CORE_OPTS)
conf.register_cli_opts(DB_OPTS, 'database')
| {
"content_hash": "754779412554f79c6f7ee5655b0d5b45",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 76,
"avg_line_length": 32.77272727272727,
"alnum_prop": 0.5818307905686546,
"repo_name": "huntxu/neutron",
"id": "b02f177ce3eefcd2a16d5b0b555fbfabf94d2e9f",
"size": "2015",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutron/conf/db/migration_cli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "11111676"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
} |
"""
Example Airflow DAG that shows interactions with Google Cloud Firestore.
Prerequisites
=============
This example uses two Google Cloud projects:
* ``GCP_PROJECT_ID`` - It contains a bucket and a firestore database.
* ``G_FIRESTORE_PROJECT_ID`` - it contains the Data Warehouse based on the BigQuery service.
Saving in a bucket should be possible from the ``G_FIRESTORE_PROJECT_ID`` project.
Reading from a bucket should be possible from the ``GCP_PROJECT_ID`` project.
The bucket and dataset should be located in the same region.
If you want to run this example, you must do the following:
1. Create Google Cloud project and enable the BigQuery API
2. Create the Firebase project
3. Create a bucket in the same location as the Firebase project
4. Grant Firebase admin account permissions to manage BigQuery. This is required to create a dataset.
5. Create a bucket in Firebase project and
6. Give read/write access for Firebase admin to bucket to step no. 5.
7. Create collection in the Firestore database.
"""
from __future__ import annotations
import os
from datetime import datetime
from urllib.parse import urlparse
from airflow import models
from airflow.providers.google.cloud.operators.bigquery import (
BigQueryCreateEmptyDatasetOperator,
BigQueryCreateExternalTableOperator,
BigQueryDeleteDatasetOperator,
BigQueryInsertJobOperator,
)
from airflow.providers.google.cloud.operators.gcs import GCSCreateBucketOperator, GCSDeleteBucketOperator
from airflow.providers.google.firebase.operators.firestore import CloudFirestoreExportDatabaseOperator
from airflow.utils.trigger_rule import TriggerRule
ENV_ID = os.environ.get("SYSTEM_TESTS_ENV_ID")
DAG_ID = "example_google_firestore"
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-gcp-project")
FIRESTORE_PROJECT_ID = os.environ.get("G_FIRESTORE_PROJECT_ID", "example-firebase-project")
BUCKET_NAME = f"bucket_{DAG_ID}_{ENV_ID}"
DATASET_NAME = f"dataset_{DAG_ID}_{ENV_ID}"
EXPORT_DESTINATION_URL = os.environ.get("GCP_FIRESTORE_ARCHIVE_URL", "gs://INVALID BUCKET NAME/namespace/")
EXPORT_PREFIX = urlparse(EXPORT_DESTINATION_URL).path
EXPORT_COLLECTION_ID = os.environ.get("GCP_FIRESTORE_COLLECTION_ID", "firestore_collection_id")
DATASET_LOCATION = os.environ.get("GCP_FIRESTORE_DATASET_LOCATION", "EU")
if BUCKET_NAME is None:
raise ValueError("Bucket name is required. Please set GCP_FIRESTORE_ARCHIVE_URL env variable.")
with models.DAG(
DAG_ID,
start_date=datetime(2021, 1, 1),
schedule="@once",
catchup=False,
tags=["example", "firestore"],
) as dag:
create_bucket = GCSCreateBucketOperator(task_id="create_bucket", bucket_name=BUCKET_NAME)
create_dataset = BigQueryCreateEmptyDatasetOperator(
task_id="create_dataset",
dataset_id=DATASET_NAME,
location=DATASET_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [START howto_operator_export_database_to_gcs]
export_database_to_gcs = CloudFirestoreExportDatabaseOperator(
task_id="export_database_to_gcs",
project_id=FIRESTORE_PROJECT_ID,
body={"outputUriPrefix": EXPORT_DESTINATION_URL, "collectionIds": [EXPORT_COLLECTION_ID]},
)
# [END howto_operator_export_database_to_gcs]
# [START howto_operator_create_external_table_multiple_types]
create_external_table_multiple_types = BigQueryCreateExternalTableOperator(
task_id="create_external_table",
bucket=BUCKET_NAME,
table_resource={
"tableReference": {
"projectId": GCP_PROJECT_ID,
"datasetId": DATASET_NAME,
"tableId": "firestore_data",
},
"schema": {
"fields": [
{"name": "name", "type": "STRING"},
{"name": "post_abbr", "type": "STRING"},
]
},
"externalDataConfiguration": {
"sourceFormat": "DATASTORE_BACKUP",
"compression": "NONE",
"csvOptions": {"skipLeadingRows": 1},
},
},
)
# [END howto_operator_create_external_table_multiple_types]
read_data_from_gcs_multiple_types = BigQueryInsertJobOperator(
task_id="execute_query",
configuration={
"query": {
"query": f"SELECT COUNT(*) FROM `{GCP_PROJECT_ID}.{DATASET_NAME}.firestore_data`",
"useLegacySql": False,
}
},
)
delete_dataset = BigQueryDeleteDatasetOperator(
task_id="delete_dataset",
dataset_id=DATASET_NAME,
project_id=GCP_PROJECT_ID,
delete_contents=True,
trigger_rule=TriggerRule.ALL_DONE,
)
delete_bucket = GCSDeleteBucketOperator(
task_id="delete_bucket", bucket_name=BUCKET_NAME, trigger_rule=TriggerRule.ALL_DONE
)
(
# TEST SETUP
create_bucket
>> create_dataset
# TEST BODY
>> export_database_to_gcs
>> create_external_table_multiple_types
>> read_data_from_gcs_multiple_types
# TEST TEARDOWN
>> delete_dataset
>> delete_bucket
)
from tests.system.utils.watcher import watcher
# This test needs watcher in order to properly mark success/failure
# when "tearDown" task with trigger rule is part of the DAG
list(dag.tasks) >> watcher()
from tests.system.utils import get_test_run # noqa: E402
# Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest)
test_run = get_test_run(dag)
| {
"content_hash": "3097b0318cce3523a3fc51e3cd86ab43",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 107,
"avg_line_length": 36.209150326797385,
"alnum_prop": 0.6756317689530686,
"repo_name": "nathanielvarona/airflow",
"id": "90591c085ad51957f11e8d603096f582795729ef",
"size": "6327",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/system/providers/google/cloud/gcs/example_firestore.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "70681"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173025"
},
{
"name": "JavaScript",
"bytes": "142848"
},
{
"name": "Jinja",
"bytes": "38895"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23169682"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211967"
},
{
"name": "TypeScript",
"bytes": "484556"
}
],
"symlink_target": ""
} |
"""
DVR Disaster Victim Registry
This module is to provide a view on those Persons who have been affected by the disaster
- they may be eligible to be placed on a beneficiary list to receive food, NFIs or cash-for-work from NGOs
- if they have suffered financial loss, they may be eligible for compensation from the government.
@ToDo
"""
module = "dvr"
if deployment_settings.has_module(module):
pass
# Resource
#resourcename = "resource"
#table = module + "_" + resourcename
#db.define_table(table,
# Field("name"),
# migrate=migrate)
| {
"content_hash": "5b8fcd3ab64ea278ddfa508d83864eea",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 110,
"avg_line_length": 31.3,
"alnum_prop": 0.645367412140575,
"repo_name": "ptressel/sahana-eden-madpub",
"id": "e3d3b69042cdcfff3fd114b496c3a45cc9234a54",
"size": "651",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "models/dvr.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "14896489"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "14827014"
},
{
"name": "Shell",
"bytes": "1171"
}
],
"symlink_target": ""
} |
"""
Example demonstrating showing a quad. Like hello_quad1.py, but now
with Texture2D and VertexBuffer, and optionally using an ElementBuffer to
draw the vertices.
"""
import time
import numpy as np
from vispy import gloo
from vispy import app
# Create a texture
im1 = np.zeros((100, 100, 3), 'float32')
im1[:50, :, 0] = 1.0
im1[:, :50, 1] = 1.0
im1[50:, 50:, 2] = 1.0
# Create vetices and texture coords, combined in one array for high performance
vertex_data = np.zeros(4, dtype=[('a_position', np.float32, 3),
('a_texcoord', np.float32, 2)])
vertex_data['a_position'] = np.array([[-0.8, -0.8, 0.0], [+0.7, -0.7, 0.0],
[-0.7, +0.7, 0.0], [+0.8, +0.8, 0.0, ]])
vertex_data['a_texcoord'] = np.array([[0.0, 0.0], [0.0, 1.0],
[1.0, 0.0], [1.0, 1.0]])
# Create indices and an ElementBuffer for it
indices = np.array([0, 1, 2, 1, 2, 3], np.uint16)
indices_buffer = gloo.IndexBuffer(indices)
client_indices_buffer = gloo.IndexBuffer(indices)
VERT_SHADER = """ // simple vertex shader
attribute vec3 a_position;
attribute vec2 a_texcoord;
uniform float sizeFactor;
void main (void) {
// Pass tex coords
gl_TexCoord[0] = vec4(a_texcoord.x, a_texcoord.y, 0.0, 0.0);
// Calculate position
gl_Position = sizeFactor*vec4(a_position.x, a_position.y, a_position.z,
1.0/sizeFactor);
}
"""
FRAG_SHADER = """ // simple fragment shader
uniform sampler2D texture1;
void main()
{
gl_FragColor = texture2D(texture1, gl_TexCoord[0].st);
}
"""
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, keys='interactive')
# Create program
self._program = gloo.Program(VERT_SHADER, FRAG_SHADER)
# Create vertex buffer
self._vbo = gloo.VertexBuffer(vertex_data)
# Set uniforms, samplers, attributes
# We create one VBO with all vertex data (array of structures)
# and create two views from it for the attributes.
self._program['texture1'] = gloo.Texture2D(im1)
self._program.bind(self._vbo) # This does:
#self._program['a_position'] = self._vbo['a_position']
#self._program['a_texcoords'] = self._vbo['a_texcoords']
gloo.set_clear_color('white')
self._timer = app.Timer('auto', connect=self.update, start=True)
self.show()
def on_resize(self, event):
width, height = event.physical_size
gloo.set_viewport(0, 0, width, height)
def on_draw(self, event):
# Clear
gloo.clear()
# Draw
self._program['sizeFactor'] = 0.5 + np.sin(time.time() * 3) * 0.2
# Draw (pick one!)
# self._program.draw('triangle_strip')
self._program.draw('triangles', indices_buffer)
# self._program.draw('triangles', client_indices_buffer) # Not
# recommended
if __name__ == '__main__':
c = Canvas()
app.run()
| {
"content_hash": "b44a511ac941bb005811977d19447755",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 79,
"avg_line_length": 28.42452830188679,
"alnum_prop": 0.5867905741785596,
"repo_name": "dchilds7/Deysha-Star-Formation",
"id": "43e2aed6fb6ea269f56afaa599275277d8db2994",
"size": "3080",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "examples/basics/gloo/animate_shape.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "143081"
},
{
"name": "GLSL",
"bytes": "195460"
},
{
"name": "JavaScript",
"bytes": "5007"
},
{
"name": "Makefile",
"bytes": "1638"
},
{
"name": "PowerShell",
"bytes": "4078"
},
{
"name": "Python",
"bytes": "2467392"
}
],
"symlink_target": ""
} |
import importlib
import inspect
import os
import re
import sys
import tempfile
from io import StringIO
from pathlib import Path
from django.conf.urls import url
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import DatabaseError, connection
from django.shortcuts import render
from django.template import TemplateDoesNotExist
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import LoggingCaptureMixin, patch_logger
from django.urls import reverse
from django.utils.functional import SimpleLazyObject
from django.utils.safestring import mark_safe
from django.utils.version import PY36
from django.views.debug import (
CLEANSED_SUBSTITUTE, CallableSettingWrapper, ExceptionReporter,
cleanse_setting, technical_500_response,
)
from ..views import (
custom_exception_reporter_filter_view, index_page,
multivalue_dict_key_error, non_sensitive_view, paranoid_view,
sensitive_args_function_caller, sensitive_kwargs_function_caller,
sensitive_method_view, sensitive_view,
)
class User:
def __str__(self):
return 'jacob'
class WithoutEmptyPathUrls:
urlpatterns = [url(r'url/$', index_page, name='url')]
class CallableSettingWrapperTests(SimpleTestCase):
""" Unittests for CallableSettingWrapper
"""
def test_repr(self):
class WrappedCallable:
def __repr__(self):
return "repr from the wrapped callable"
def __call__(self):
pass
actual = repr(CallableSettingWrapper(WrappedCallable()))
self.assertEqual(actual, "repr from the wrapped callable")
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
class DebugViewTests(LoggingCaptureMixin, SimpleTestCase):
def test_files(self):
response = self.client.get('/raises/')
self.assertEqual(response.status_code, 500)
data = {
'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'),
}
response = self.client.post('/raises/', data)
self.assertContains(response, 'file_data.txt', status_code=500)
self.assertNotContains(response, 'haha', status_code=500)
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
# Ensure no 403.html template exists to test the default case.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
}])
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
# Set up a test 403.html template.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'403.html': 'This is a test template for a 403 error ({{ exception }}).',
}),
],
},
}])
def test_403_template(self):
response = self.client.get('/raises403/')
self.assertContains(response, 'test template', status_code=403)
self.assertContains(response, '(Insufficient Permissions).', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_raised_404(self):
response = self.client.get('/views/raises404/')
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_404_not_in_urls(self):
response = self.client.get('/not-in-urls')
self.assertNotContains(response, "Raised by:", status_code=404)
self.assertContains(response, "Django tried these URL patterns", status_code=404)
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
# Pattern and view name of a RegexURLPattern appear.
self.assertContains(response, r"^regex-post/(?P<pk>[0-9]+)/$", status_code=404)
self.assertContains(response, "[name='regex-post']", status_code=404)
# Pattern and view name of a RoutePattern appear.
self.assertContains(response, r"path-post/<int:pk>/", status_code=404)
self.assertContains(response, "[name='path-post']", status_code=404)
@override_settings(ROOT_URLCONF=WithoutEmptyPathUrls)
def test_404_empty_path_not_in_urls(self):
response = self.client.get('/')
self.assertContains(response, "The empty path didn't match any of these.", status_code=404)
def test_technical_404(self):
response = self.client.get('/views/technical404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.technical404", status_code=404)
def test_classbased_technical_404(self):
response = self.client.get('/views/classbased404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.Http404View", status_code=404)
def test_non_l10ned_numeric_ids(self):
"""
Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized.
"""
with self.settings(DEBUG=True, USE_L10N=True):
response = self.client.get('/raises500/')
# We look for a HTML fragment of the form
# '<div class="context" id="c38123208">', not '<div class="context" id="c38,123,208"'
self.assertContains(response, '<div class="context" id="', status_code=500)
match = re.search(b'<div class="context" id="(?P<id>[^"]+)">', response.content)
self.assertIsNotNone(match)
id_repr = match.group('id')
self.assertFalse(
re.search(b'[^c0-9]', id_repr),
"Numeric IDs in debug response HTML page shouldn't be localized (value: %s)." % id_repr.decode()
)
def test_template_exceptions(self):
try:
self.client.get(reverse('template_exception'))
except Exception:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertNotEqual(
raising_loc.find("raise Exception('boom')"), -1,
"Failed to find 'raise Exception' in last frame of "
"traceback, instead found: %s" % raising_loc
)
def test_template_loader_postmortem(self):
"""Tests for not existing file"""
template_name = "notfound.html"
with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile:
tempdir = os.path.dirname(tmpfile.name)
template_path = os.path.join(tempdir, template_name)
with override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [tempdir],
}]):
response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name}))
self.assertContains(response, "%s (Source does not exist)" % template_path, status_code=500, count=2)
# Assert as HTML.
self.assertContains(
response,
'<li><code>django.template.loaders.filesystem.Loader</code>: '
'%s (Source does not exist)</li>' % os.path.join(tempdir, 'notfound.html'),
status_code=500,
html=True,
)
def test_no_template_source_loaders(self):
"""
Make sure if you don't specify a template, the debug view doesn't blow up.
"""
with self.assertRaises(TemplateDoesNotExist):
self.client.get('/render_no_template/')
@override_settings(ROOT_URLCONF='view_tests.default_urls')
def test_default_urlconf_template(self):
"""
Make sure that the default URLconf template is shown shown instead
of the technical 404 page, if the user has not altered their
URLconf yet.
"""
response = self.client.get('/')
self.assertContains(
response,
"<h2>The install worked successfully! Congratulations!</h2>"
)
@override_settings(ROOT_URLCONF='view_tests.regression_21530_urls')
def test_regression_21530(self):
"""
Regression test for bug #21530.
If the admin app include is replaced with exactly one url
pattern, then the technical 404 template should be displayed.
The bug here was that an AttributeError caused a 500 response.
"""
response = self.client.get('/')
self.assertContains(
response,
"Page not found <span>(404)</span>",
status_code=404
)
class DebugViewQueriesAllowedTests(SimpleTestCase):
# May need a query to initialize MySQL connection
allow_database_queries = True
def test_handle_db_exception(self):
"""
Ensure the debug view works when a database exception is raised by
performing an invalid query and passing the exception to the debug view.
"""
with connection.cursor() as cursor:
try:
cursor.execute('INVALID SQL')
except DatabaseError:
exc_info = sys.exc_info()
rf = RequestFactory()
response = technical_500_response(rf.get('/'), *exc_info)
self.assertContains(response, 'OperationalError at /', status_code=500)
@override_settings(
DEBUG=True,
ROOT_URLCONF='view_tests.urls',
# No template directories are configured, so no templates will be found.
TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
}],
)
class NonDjangoTemplatesDebugViewTests(SimpleTestCase):
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
with patch_logger('django.security.SuspiciousOperation', 'error'):
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_template_not_found_error(self):
# Raises a TemplateDoesNotExist exception and shows the debug view.
url = reverse('raises_template_does_not_exist', kwargs={"path": "notfound.html"})
response = self.client.get(url)
self.assertContains(response, '<div class="context" id="', status_code=500)
class ExceptionReporterTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>jacob</p>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
self.assertIn('<p>No POST data</p>', html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_eol_support(self):
"""The ExceptionReporter supports Unix, Windows and Macintosh EOL markers"""
LINES = ['print %d' % i for i in range(1, 6)]
reporter = ExceptionReporter(None, None, None, None)
for newline in ['\n', '\r\n', '\r']:
fd, filename = tempfile.mkstemp(text=False)
os.write(fd, (newline.join(LINES) + newline).encode())
os.close(fd)
try:
self.assertEqual(
reporter._get_lines_from_file(filename, 3, 2),
(1, LINES[1:3], LINES[3], LINES[4:])
)
finally:
os.unlink(filename)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">No exception message supplied</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_reporting_of_nested_exceptions(self):
request = self.rf.get('/test_view/')
try:
try:
raise AttributeError(mark_safe('<p>Top level</p>'))
except AttributeError as explicit:
try:
raise ValueError(mark_safe('<p>Second exception</p>')) from explicit
except ValueError:
raise IndexError(mark_safe('<p>Final exception</p>'))
except Exception:
# Custom exception handler, just pass it into ExceptionReporter
exc_type, exc_value, tb = sys.exc_info()
explicit_exc = 'The above exception ({0}) was the direct cause of the following exception:'
implicit_exc = 'During handling of the above exception ({0}), another exception occurred:'
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
# Both messages are twice on page -- one rendered as html,
# one as plain text (for pastebin)
self.assertEqual(2, html.count(explicit_exc.format('<p>Top level</p>')))
self.assertEqual(2, html.count(implicit_exc.format('<p>Second exception</p>')))
self.assertEqual(10, html.count('<p>Final exception</p>'))
text = reporter.get_traceback_text()
self.assertIn(explicit_exc.format('<p>Top level</p>'), text)
self.assertIn(implicit_exc.format('<p>Second exception</p>'), text)
self.assertEqual(3, text.count('<p>Final exception</p>'))
def test_reporting_frames_without_source(self):
try:
source = "def funcName():\n raise Error('Whoops')\nfuncName()"
namespace = {}
code = compile(source, 'generated', 'exec')
exec(code, namespace)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
frames = reporter.get_traceback_frames()
last_frame = frames[-1]
self.assertEqual(last_frame['context_line'], '<source code not available>')
self.assertEqual(last_frame['filename'], 'generated')
self.assertEqual(last_frame['function'], 'funcName')
self.assertEqual(last_frame['lineno'], 2)
html = reporter.get_traceback_html()
self.assertIn('generated in funcName', html)
text = reporter.get_traceback_text()
self.assertIn('"generated" in funcName', text)
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_non_utf8_values_handling(self):
"Non-UTF-8 exceptions/values should not make the output generation choke."
try:
class NonUtf8Output(Exception):
def __repr__(self):
return b'EXC\xe9EXC'
somevar = b'VAL\xe9VAL' # NOQA
raise NonUtf8Output()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('VAL\\xe9VAL', html)
self.assertIn('EXC\\xe9EXC', html)
def test_local_variable_escaping(self):
"""Safe strings in local variables are escaped."""
try:
local = mark_safe('<p>Local variable</p>')
raise ValueError(local)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
html = ExceptionReporter(None, exc_type, exc_value, tb).get_traceback_html()
self.assertIn('<td class="code"><pre>'<p>Local variable</p>'</pre></td>', html)
def test_unprintable_values_handling(self):
"Unprintable values should not make the output generation choke."
try:
class OomOutput:
def __repr__(self):
raise MemoryError('OOM')
oomvalue = OomOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<td class="code"><pre>Error in formatting', html)
def test_too_large_values_handling(self):
"Large values should not create a large HTML."
large = 256 * 1024
repr_of_str_adds = len(repr(''))
try:
class LargeOutput:
def __repr__(self):
return repr('A' * large)
largevalue = LargeOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb
self.assertIn('<trimmed %d bytes string>' % (large + repr_of_str_adds,), html)
def test_encoding_error(self):
"""
A UnicodeError displays a portion of the problematic string. HTML in
safe strings is escaped.
"""
try:
mark_safe('abcdefghijkl<p>mnὀp</p>qrstuwxyz').encode('ascii')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h2>Unicode error hint</h2>', html)
self.assertIn('The string that could not be encoded/decoded was: ', html)
self.assertIn('<strong><p>mnὀp</p></strong>', html)
def test_unfrozen_importlib(self):
"""
importlib is not a frozen app, but its loader thinks it's frozen which
results in an ImportError. Refs #21443.
"""
try:
request = self.rf.get('/test_view/')
importlib.import_module('abc.def.invalid.name')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>%sError at /test_view/</h1>' % ('ModuleNotFound' if PY36 else 'Import'), html)
def test_ignore_traceback_evaluation_exceptions(self):
"""
Don't trip over exceptions generated by crafted objects when
evaluating them while cleansing (#24455).
"""
class BrokenEvaluation(Exception):
pass
def broken_setup():
raise BrokenEvaluation
request = self.rf.get('/test_view/')
broken_lazy = SimpleLazyObject(broken_setup)
try:
bool(broken_lazy)
except BrokenEvaluation:
exc_type, exc_value, tb = sys.exc_info()
self.assertIn(
"BrokenEvaluation",
ExceptionReporter(request, exc_type, exc_value, tb).get_traceback_html(),
"Evaluation exception reason not mentioned in traceback"
)
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn("http://evil.com/", html)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
value = '<td>items</td><td class="code"><pre>'Oops'</pre></td>'
# GET
request = self.rf.get('/test_view/?items=Oops')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# POST
request = self.rf.post('/test_view/', data={'items': 'Oops'})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# FILES
fp = StringIO('filecontent')
request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(
'<td>items</td><td class="code"><pre><InMemoryUploadedFile: '
'items (application/octet-stream)></pre></td>',
html
)
# COOKES
rf = RequestFactory()
rf.cookies['items'] = 'Oops'
request = rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML('<td>items</td><td class="code"><pre>'Oops'</pre></td>', html)
def test_exception_fetching_user(self):
"""
The error page can be rendered if the current user can't be retrieved
(such as when the database is unavailable).
"""
class ExceptionUser:
def __str__(self):
raise Exception()
request = self.rf.get('/test_view/')
request.user = ExceptionUser()
try:
raise ValueError('Oops')
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Oops</pre>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>[unable to retrieve the current user]</p>', html)
text = reporter.get_traceback_text()
self.assertIn('USER: [unable to retrieve the current user]', text)
class PlainTextReportTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError at /test_view/', text)
self.assertIn("Can't find my keys", text)
self.assertIn('Request Method:', text)
self.assertIn('Request URL:', text)
self.assertIn('USER: jacob', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request information:', text)
self.assertNotIn('Request data not supplied', text)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError', text)
self.assertIn("Can't find my keys", text)
self.assertNotIn('Request Method:', text)
self.assertNotIn('Request URL:', text)
self.assertNotIn('USER:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request data not supplied', text)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
reporter.get_traceback_text()
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(DEBUG=True)
def test_template_exception(self):
request = self.rf.get('/test_view/')
try:
render(request, 'debug/template_error.html')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
templ_path = Path(Path(__file__).parent.parent, 'templates', 'debug', 'template_error.html')
self.assertIn(
'Template error:\n'
'In template %(path)s, error at line 2\n'
' \'cycle\' tag requires at least two arguments\n'
' 1 : Template with error:\n'
' 2 : {%% cycle %%} \n'
' 3 : ' % {'path': templ_path},
text
)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
# GET
request = self.rf.get('/test_view/?items=Oops')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# POST
request = self.rf.post('/test_view/', data={'items': 'Oops'})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# FILES
fp = StringIO('filecontent')
request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn('items = <InMemoryUploadedFile:', text)
# COOKES
rf = RequestFactory()
rf.cookies['items'] = 'Oops'
request = rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("http://evil.com/", text)
class ExceptionReportTestMixin:
# Mixin used in the ExceptionReporterFilterTests and
# AjaxResponseExceptionReporterFilter tests below
breakfast_data = {'sausage-key': 'sausage-value',
'baked-beans-key': 'baked-beans-value',
'hash-brown-key': 'hash-brown-value',
'bacon-key': 'bacon-value'}
def verify_unsafe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# All variables are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertContains(response, k, status_code=500)
self.assertContains(response, v, status_code=500)
def verify_safe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Non-sensitive variable's name and value are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
# Sensitive variable's name is shown but not its value.
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k in self.breakfast_data:
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# Non-sensitive POST parameters' values are shown.
self.assertContains(response, 'baked-beans-value', status_code=500)
self.assertContains(response, 'hash-brown-value', status_code=500)
# Sensitive POST parameters' values are not shown.
self.assertNotContains(response, 'sausage-value', status_code=500)
self.assertNotContains(response, 'bacon-value', status_code=500)
def verify_paranoid_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that no variables or POST parameters are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Show variable names but not their values.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertNotContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# No POST parameters' values are shown.
self.assertNotContains(response, v, status_code=500)
def verify_unsafe_email(self, view, check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', '[email protected]')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertIn(k, body_plain)
self.assertIn(v, body_plain)
self.assertIn(k, body_html)
self.assertIn(v, body_html)
def verify_safe_email(self, view, check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', '[email protected]')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertNotIn('worcestershire', body_html)
if check_for_POST_params:
for k in self.breakfast_data:
# All POST parameters' names are shown.
self.assertIn(k, body_plain)
# Non-sensitive POST parameters' values are shown.
self.assertIn('baked-beans-value', body_plain)
self.assertIn('hash-brown-value', body_plain)
self.assertIn('baked-beans-value', body_html)
self.assertIn('hash-brown-value', body_html)
# Sensitive POST parameters' values are not shown.
self.assertNotIn('sausage-value', body_plain)
self.assertNotIn('bacon-value', body_plain)
self.assertNotIn('sausage-value', body_html)
self.assertNotIn('bacon-value', body_html)
def verify_paranoid_email(self, view):
"""
Asserts that no variables or POST parameters are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', '[email protected]')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = str(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# No POST parameters' values are shown.
self.assertNotIn(v, body)
@override_settings(ROOT_URLCONF='view_tests.urls')
class ExceptionReporterFilterTests(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Sensitive information can be filtered out of error reports (#14614).
"""
rf = RequestFactory()
def test_non_sensitive_request(self):
"""
Everything (request info and frame variables) can bee seen
in the default error reports for non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
def test_sensitive_request(self):
"""
Sensitive POST parameters and frame variables cannot be
seen in the default error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view)
self.verify_unsafe_email(sensitive_view)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view)
self.verify_safe_email(sensitive_view)
def test_paranoid_request(self):
"""
No POST parameters and frame variables can be seen in the
default error reports for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view)
self.verify_unsafe_email(paranoid_view)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view)
self.verify_paranoid_email(paranoid_view)
def test_multivalue_dict_key_error(self):
"""
#21098 -- Sensitive POST parameters cannot be seen in the
error reports for if request.POST['nonexistent_key'] throws an error.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(multivalue_dict_key_error)
self.verify_unsafe_email(multivalue_dict_key_error)
with self.settings(DEBUG=False):
self.verify_safe_response(multivalue_dict_key_error)
self.verify_safe_email(multivalue_dict_key_error)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
def test_sensitive_method(self):
"""
The sensitive_variables decorator works with object methods.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_method_view, check_for_POST_params=False)
self.verify_unsafe_email(sensitive_method_view, check_for_POST_params=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_method_view, check_for_POST_params=False)
self.verify_safe_email(sensitive_method_view, check_for_POST_params=False)
def test_sensitive_function_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as arguments to the decorated
function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_args_function_caller)
self.verify_unsafe_email(sensitive_args_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_args_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_args_function_caller, check_for_POST_params=False)
def test_sensitive_function_keyword_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as keyword arguments to the
decorated function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_kwargs_function_caller)
self.verify_unsafe_email(sensitive_kwargs_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_kwargs_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_kwargs_function_caller, check_for_POST_params=False)
def test_callable_settings(self):
"""
Callable settings should not be evaluated in the debug page (#21345).
"""
def callable_setting():
return "This should not be displayed"
with self.settings(DEBUG=True, FOOBAR=callable_setting):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_callable_settings_forbidding_to_set_attributes(self):
"""
Callable settings which forbid to set attributes should not break
the debug page (#23070).
"""
class CallableSettingWithSlots:
__slots__ = []
def __call__(self):
return "This should not be displayed"
with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_dict_setting_with_non_str_key(self):
"""
A dict setting containing a non-string key should not break the
debug page (#12744).
"""
with self.settings(DEBUG=True, FOOBAR={42: None}):
response = self.client.get('/raises500/')
self.assertContains(response, 'FOOBAR', status_code=500)
def test_sensitive_settings(self):
"""
The debug page should not show some sensitive settings
(password, secret key, ...).
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
with self.settings(DEBUG=True, **{setting: "should not be displayed"}):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
def test_settings_with_sensitive_keys(self):
"""
The debug page should filter out some sensitive information found in
dict settings.
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
FOOBAR = {
setting: "should not be displayed",
'recursive': {setting: "should not be displayed"},
}
with self.settings(DEBUG=True, FOOBAR=FOOBAR):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
class AjaxResponseExceptionReporterFilter(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Sensitive information can be filtered out of error reports.
Here we specifically test the plain text 500 debug-only error page served
when it has been detected the request was sent by JS code. We don't check
for (non)existence of frames vars in the traceback information section of
the response content because we don't include them in these error pages.
Refs #14614.
"""
rf = RequestFactory(HTTP_X_REQUESTED_WITH='XMLHttpRequest')
def test_non_sensitive_request(self):
"""
Request info can bee seen in the default error reports for
non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
def test_sensitive_request(self):
"""
Sensitive POST parameters cannot be seen in the default
error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view, check_for_vars=False)
def test_paranoid_request(self):
"""
No POST parameters can be seen in the default error reports
for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view, check_for_vars=False)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
def test_ajax_response_encoding(self):
response = self.client.get('/raises500/', HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response['Content-Type'], 'text/plain; charset=utf-8')
class HelperFunctionTests(SimpleTestCase):
def test_cleanse_setting_basic(self):
self.assertEqual(cleanse_setting('TEST', 'TEST'), 'TEST')
self.assertEqual(cleanse_setting('PASSWORD', 'super_secret'), CLEANSED_SUBSTITUTE)
def test_cleanse_setting_ignore_case(self):
self.assertEqual(cleanse_setting('password', 'super_secret'), CLEANSED_SUBSTITUTE)
def test_cleanse_setting_recurses_in_dictionary(self):
initial = {'login': 'cooper', 'password': 'secret'}
expected = {'login': 'cooper', 'password': CLEANSED_SUBSTITUTE}
self.assertEqual(cleanse_setting('SETTING_NAME', initial), expected)
| {
"content_hash": "51720d55c5e659bd1ef97e3c9eb6ccff",
"timestamp": "",
"source": "github",
"line_count": 1154,
"max_line_length": 117,
"avg_line_length": 43.30069324090121,
"alnum_prop": 0.6158218095219036,
"repo_name": "jpic/django",
"id": "e67d392d64fd3ccff2af46e9016ba1dd44f7e471",
"size": "49973",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tests/view_tests/tests/test_debug.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "84168"
},
{
"name": "HTML",
"bytes": "224612"
},
{
"name": "JavaScript",
"bytes": "255642"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12358598"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('images', '0014_auto_20180629_2250'),
]
operations = [
migrations.AddField(
model_name='imageset',
name='pinned_by',
field=models.ManyToManyField(related_name='pinned_sets', to=settings.AUTH_USER_MODEL),
),
]
| {
"content_hash": "5fedecae9a121d4ba6ab9e662427b4c5",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 98,
"avg_line_length": 27.055555555555557,
"alnum_prop": 0.6365503080082136,
"repo_name": "bit-bots/imagetagger",
"id": "65060c6742b1ef23169958bbe45e3568215ecd99",
"size": "536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/imagetagger/images/migrations/0015_imageset_pinned_by.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12288"
},
{
"name": "Dockerfile",
"bytes": "2049"
},
{
"name": "HTML",
"bytes": "273837"
},
{
"name": "JavaScript",
"bytes": "234939"
},
{
"name": "Python",
"bytes": "252248"
},
{
"name": "Shell",
"bytes": "299"
}
],
"symlink_target": ""
} |
'''
//----------------------------------------------------------------------------
// Name: msp6info.py
// Purpose: Information for msp6prog about button.
//
// Author: Watson Huang <[email protected]>
//
// Created: 01/19, 2015
// Copyright: (c) 2015 by Watson Huang
// License: MIT License
//----------------------------------------------------------------------------
'''
iname = "MSP6+ Configure Tool";
iversion = "v0.1";
icopyright = "\
(c) 2015 Watson Huang for MSP6+ Configure Tool\n \
(c) Xilinx belong to Xilinx Inc.\n \
(c) FT2232H belong to FTDI Ltd.\n \
(c) MiniSpartan6+ Belong to Scarab Hardware."
iwebsite = ("https://github.com/wats0n/msp6prog", "MSP6+ Configure Tool on Github.com");
ideveloper = ["Watson Huang <[email protected]>"]
ilicense = " \
The MIT License (MIT)\n\
Copyright (c) 2015 Watson Huang\n\
Permission is hereby granted, free of charge, to any person obtaining a copy \
of this software and associated documentation files (the \"Software\"), to deal \
in the Software without restriction, including without limitation the rights \
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell \
copies of the Software, and to permit persons to whom the Software is \
furnished to do so, subject to the following conditions: \
The above copyright notice and this permission notice shall be included in \
all copies or substantial portions of the Software.\n\
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR \
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, \
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE \
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER \
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN \
THE SOFTWARE.";
idescription = "\
This program is a side-project for understanding \
how to control FTDI FT2232H to operate Xilinx Spartan6.\
In order to learning and implement the MITx 6.004.2x Computation Structures: Programmable Architectures (2015 Q3).\
Main target is to design a Hi-Speed (USB2.0, 480MBps) interface form computer to Spartan6. \n\
There are three stages: \n\
1. Configure FPGA via FT2232H.\n\
2. Program Flash through Spartan6.\n\
3. Perform Partial Reconfigure with this interface.\n\
Finished stage 1 by follow abstraction:\n\
1. Implement FT2232H Low-Level programming in DLL by Visual C++ 2008 Express.\n\
2. Layout window by wxPython and operate DLL by ctypes in Python.\n\
Stage 2 is underway but no clear schedule.\n\
It's done when it's done.\n\
-wats0n\
";
| {
"content_hash": "559890aed7eab4d1c3d9faa3071f6652",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 115,
"avg_line_length": 45.83050847457627,
"alnum_prop": 0.6963757396449705,
"repo_name": "wats0n/msp6prog",
"id": "3124bfbb512f3e8fc96807568075d1182d28d38e",
"size": "2704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "msp6info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12480"
}
],
"symlink_target": ""
} |
from unittest import TestCase
import numpy
from six.moves import xrange
import theano
from theano.tests import unittest_tools as utt
import theano.sandbox.rng_mrg
from ..basic_ops import GpuFromHost, HostFromGpu
from ..elemwise import GpuElemwise
from .config import mode_with_gpu, test_ctx_name
class T_Scan(TestCase):
def setUp(self):
utt.seed_rng()
def test_one_sequence_one_output_weights_gpu1(self):
def f_rnn(u_t, x_tm1, W_in, W):
return u_t * W_in + x_tm1 * W
u = theano.tensor.fvector('u')
x0 = theano.tensor.fscalar('x0')
W_in = theano.tensor.fscalar('win')
W = theano.tensor.fscalar('w')
mode = mode_with_gpu.excluding('InputToGpuOptimizer')
output, updates = theano.scan(f_rnn,
u,
x0,
[W_in, W],
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=mode)
output = GpuFromHost(test_ctx_name)(output)
f2 = theano.function([u, x0, W_in, W],
output,
updates=updates,
allow_input_downcast=True,
mode=mode)
rng = numpy.random.RandomState(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5., high=5.)
v_x0 = rng.uniform()
W = rng.uniform()
W_in = rng.uniform()
v_u = numpy.asarray(v_u, dtype='float32')
v_x0 = numpy.asarray(v_x0, dtype='float32')
W = numpy.asarray(W, dtype='float32')
W_in = numpy.asarray(W_in, dtype='float32')
# compute the output in numpy
v_out = numpy.zeros((4,))
v_out[0] = v_u[0] * W_in + v_x0 * W
for step in xrange(1, 4):
v_out[step] = v_u[step] * W_in + v_out[step - 1] * W
theano_values = f2(v_u, v_x0, W_in, W)
utt.assert_allclose(theano_values, v_out)
# TO DEL
topo = f2.maker.fgraph.toposort()
scan_node = [node for node in topo
if isinstance(node.op, theano.scan_module.scan_op.Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
topo = f2.maker.fgraph.toposort()
assert sum([isinstance(node.op, HostFromGpu)
for node in topo]) == 0
assert sum([isinstance(node.op, GpuFromHost)
for node in topo]) == 4
scan_node = [node for node in topo
if isinstance(node.op, theano.scan_module.scan_op.Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
scan_node_topo = scan_node.op.fn.maker.fgraph.toposort()
# check that there is no gpu transfer in the inner loop.
assert any([isinstance(node.op, GpuElemwise)
for node in scan_node_topo])
assert not any([isinstance(node.op, HostFromGpu)
for node in scan_node_topo])
assert not any([isinstance(node.op, GpuFromHost)
for node in scan_node_topo])
# This second version test the second case in the optimizer to the gpu.
def test_one_sequence_one_output_weights_gpu2(self):
def f_rnn(u_t, x_tm1, W_in, W):
return u_t * W_in + x_tm1 * W
u = theano.tensor.fvector('u')
x0 = theano.tensor.fscalar('x0')
W_in = theano.tensor.fscalar('win')
W = theano.tensor.fscalar('w')
output, updates = theano.scan(f_rnn,
u,
x0,
[W_in, W],
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=mode_with_gpu)
f2 = theano.function([u, x0, W_in, W],
output,
updates=updates,
allow_input_downcast=True,
mode=mode_with_gpu)
# get random initial values
rng = numpy.random.RandomState(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5., high=5.)
v_x0 = rng.uniform()
W = rng.uniform()
W_in = rng.uniform()
# compute the output in numpy
v_out = numpy.zeros((4,))
v_out[0] = v_u[0] * W_in + v_x0 * W
for step in xrange(1, 4):
v_out[step] = v_u[step] * W_in + v_out[step - 1] * W
theano_values = f2(v_u, v_x0, W_in, W)
utt.assert_allclose(theano_values, v_out)
topo = f2.maker.fgraph.toposort()
assert sum([isinstance(node.op, HostFromGpu)
for node in topo]) == 1
assert sum([isinstance(node.op, GpuFromHost)
for node in topo]) == 4
scan_node = [node for node in topo
if isinstance(node.op, theano.scan_module.scan_op.Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
scan_node_topo = scan_node.op.fn.maker.fgraph.toposort()
# check that there is no gpu transfer in the inner loop.
assert any([isinstance(node.op, GpuElemwise)
for node in scan_node_topo])
assert not any([isinstance(node.op, HostFromGpu)
for node in scan_node_topo])
assert not any([isinstance(node.op, GpuFromHost)
for node in scan_node_topo])
# This third test checks that scan can deal with a mixture of dtypes as
# outputs when is running on GPU
def test_gpu3_mixture_dtype_outputs(self):
def f_rnn(u_t, x_tm1, W_in, W):
return (u_t * W_in + x_tm1 * W,
theano.tensor.cast(u_t + x_tm1, 'int64'))
u = theano.tensor.fvector('u')
x0 = theano.tensor.fscalar('x0')
W_in = theano.tensor.fscalar('win')
W = theano.tensor.fscalar('w')
output, updates = theano.scan(f_rnn,
u,
[x0, None],
[W_in, W],
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=mode_with_gpu)
f2 = theano.function([u, x0, W_in, W],
output,
updates=updates,
allow_input_downcast=True,
mode=mode_with_gpu)
# get random initial values
rng = numpy.random.RandomState(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5., high=5.)
v_x0 = rng.uniform()
W = rng.uniform()
W_in = rng.uniform()
# compute the output in numpy
v_out1 = numpy.zeros((4,))
v_out2 = numpy.zeros((4,), dtype='int64')
v_out1[0] = v_u[0] * W_in + v_x0 * W
v_out2[0] = v_u[0] + v_x0
for step in xrange(1, 4):
v_out1[step] = v_u[step] * W_in + v_out1[step - 1] * W
v_out2[step] = numpy.int64(v_u[step] + v_out1[step - 1])
theano_out1, theano_out2 = f2(v_u, v_x0, W_in, W)
utt.assert_allclose(theano_out1, v_out1)
utt.assert_allclose(theano_out2, v_out2)
topo = f2.maker.fgraph.toposort()
scan_node = [node for node in topo
if isinstance(node.op, theano.scan_module.scan_op.Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
assert scan_node.op.gpua
scan_node_topo = scan_node.op.fn.maker.fgraph.toposort()
# check that there is no gpu transfer in the inner loop.
assert not any([isinstance(node.op, HostFromGpu)
for node in scan_node_topo])
assert not any([isinstance(node.op, GpuFromHost)
for node in scan_node_topo])
def test_gpu4_gibbs_chain(self):
rng = numpy.random.RandomState(utt.fetch_seed())
v_vsample = numpy.array(rng.binomial(1, .5, size=(3, 20),),
dtype='float32')
vsample = theano.shared(v_vsample)
trng = theano.sandbox.rng_mrg.MRG_RandomStreams(
utt.fetch_seed())
def f(vsample_tm1):
return trng.binomial(vsample_tm1.shape, n=1, p=0.3,
dtype='float32') * vsample_tm1
theano_vsamples, updates = theano.scan(f,
[],
vsample,
[],
n_steps=10,
truncate_gradient=-1,
go_backwards=False,
mode=mode_with_gpu)
my_f = theano.function([],
theano_vsamples[-1],
updates=updates,
allow_input_downcast=True,
mode=mode_with_gpu)
# I leave this to tested by debugmode, this test was anyway
# more of does the graph compile kind of test
my_f()
| {
"content_hash": "328bf60fc554f02a90d79d62ab7dabf7",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 77,
"avg_line_length": 39.634854771784234,
"alnum_prop": 0.47634003350083753,
"repo_name": "cmdunkers/DeeperMind",
"id": "e6326987b6241bf3b7b222e188d94588efbc3512",
"size": "9552",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "PythonEnv/lib/python2.7/site-packages/theano/sandbox/gpuarray/tests/test_scan.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "653032"
},
{
"name": "C++",
"bytes": "3354338"
},
{
"name": "Cuda",
"bytes": "538188"
},
{
"name": "FORTRAN",
"bytes": "10375"
},
{
"name": "HTML",
"bytes": "124328"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "22186197"
},
{
"name": "Shell",
"bytes": "4377"
}
],
"symlink_target": ""
} |
import os
import shutil
import time
from datetime import datetime
from itertools import groupby
from operator import itemgetter
# ============= enthought library imports =======================
from apptools.preferences.preference_binding import bind_preference
from git import Repo, GitCommandError, NoSuchPathError
from traits.api import Instance, Str, Set, List, provides, Bool, Int
from uncertainties import ufloat, std_dev, nominal_value
from pychron import json
from pychron.core.helpers.filetools import remove_extension, list_subdirectories, list_directory, add_extension
from pychron.core.helpers.iterfuncs import groupby_key, groupby_repo
from pychron.core.i_datastore import IDatastore
from pychron.core.progress import progress_loader, progress_iterator, open_progress
from pychron.dvc import dvc_dump, dvc_load, analysis_path, repository_path, AnalysisNotAnvailableError, PATH_MODIFIERS
from pychron.dvc.cache import DVCCache
from pychron.dvc.defaults import TRIGA, HOLDER_24_SPOKES, LASER221, LASER65
from pychron.dvc.dvc_analysis import DVCAnalysis
from pychron.dvc.dvc_database import DVCDatabase
from pychron.dvc.func import find_interpreted_age_path, GitSessionCTX, push_repositories, make_interpreted_age_dict
from pychron.dvc.meta_repo import MetaRepo, get_frozen_flux, get_frozen_productions
from pychron.dvc.tasks.dvc_preferences import DVCConnectionItem
from pychron.dvc.util import Tag, DVCInterpretedAge
from pychron.envisage.browser.record_views import InterpretedAgeRecordView
from pychron.git.hosts import IGitHost
from pychron.git.hosts.local import LocalGitHostService
from pychron.git_archive.repo_manager import GitRepoManager, format_date, get_repository_branch
from pychron.git_archive.views import StatusView
from pychron.globals import globalv
from pychron.loggable import Loggable
from pychron.paths import paths, r_mkdir
from pychron.processing.interpreted_age import InterpretedAge
from pychron.pychron_constants import RATIO_KEYS, INTERFERENCE_KEYS, STARTUP_MESSAGE_POSITION
HOST_WARNING_MESSAGE = 'GitLab or GitHub or LocalGit plugin is required'
@provides(IDatastore)
class DVC(Loggable):
"""
main interface to DVC backend. Delegates responsibility to DVCDatabase and MetaRepo
"""
db = Instance('pychron.dvc.dvc_database.DVCDatabase')
meta_repo = Instance('pychron.dvc.meta_repo.MetaRepo')
meta_repo_name = Str
meta_repo_dirname = Str
organization = Str
default_team = Str
current_repository = Instance(GitRepoManager)
auto_add = True
use_auto_pull = Bool(True)
pulled_repositories = Set
selected_repositories = List
data_sources = List
data_source = Instance(DVCConnectionItem)
favorites = List
update_currents_enabled = Bool
use_cocktail_irradiation = Str
use_cache = Bool
max_cache_size = Int
irradiation_prefix = Str
_cache = None
_uuid_runid_cache = {}
def __init__(self, bind=True, *args, **kw):
super(DVC, self).__init__(*args, **kw)
if bind:
self._bind_preferences()
def initialize(self, inform=False):
self.debug('Initialize DVC')
if not self.meta_repo_name:
self.warning_dialog('Need to specify Meta Repository name in Preferences',
position=STARTUP_MESSAGE_POSITION)
return
try:
self.open_meta_repo()
except BaseException as e:
self.warning('Error opening meta repo {}'.format(e))
return
# update meta repo.
self.meta_pull()
if self.db.connect():
return True
def fix_identifier(self, src_id, dest_id, repo, identifier, new_aliquot=None):
dry = True
repo = self._get_repository(repo)
# fix git files
root = paths.repository_dataset_dir
sp = analysis_path(src_id, repo, root=root)
dp = analysis_path(dest_id, repo, root=root, mode='w')
if os.path.isfile(dp):
self.info('Already an analysis. {} {}'.format(dest_id, dp))
return
if not os.path.isfile(sp):
self.info('not a file. {}'.format(sp))
return
jd = dvc_load(sp)
jd['identifier'] = identifier
if new_aliquot:
jd['aliquot'] = new_aliquot
self.debug('{}>>{}'.format(sp, dp))
if not dry:
repo.add(sp)
repo.add(dp)
dvc_dump(jd, dp)
os.remove(sp)
for modifier in ('baselines', 'blanks', 'extraction',
'intercepts', 'icfactors', 'peakcenter', '.data'):
sp = analysis_path(src_id, repo, modifier=modifier, root=root)
dp = analysis_path(dest_id, repo, modifier=modifier, root=root, mode='w')
self.debug('{}>>{}'.format(sp, dp))
if sp and os.path.isfile(sp):
if not dry:
repo.add(sp)
repo.add(dp)
shutil.move(sp, dp)
# fix database
# c
def generate_currents(self):
if not self.update_currents_enabled:
self.information_dialog('You must enable "Current Values" in Preferences/DVC')
return
if not self.confirmation_dialog('Are you sure you want to generate current values for the entire database? '
'This could take a while!'):
return
self.info('Generate currents started')
# group by repository
db = self.db
db.create_session()
ocoa = db.commit_on_add
db.commit_on_add = False
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def func(ai, prog, i, n):
if prog:
if not i % 10:
prog.change_message('Updating Currents {} {}/{}'.format(ai.record_id, i, n))
else:
prog.increment()
ai.load_raw_data()
dban = db.get_analysis_uuid(ai.uuid)
if ai.analysis_type in ('unknown', 'cocktail'):
try:
self._update_current_age(ai, dban=dban, force=True)
except BaseException as e:
self.warning('Failed making current age for {}: {}'.format(ai.record_id, e))
if not ai.analysis_type.lower().startswith('blank'):
try:
self._update_current_blanks(ai, dban=dban, force=True, update_age=False, commit=False)
except BaseException as e:
self.warning('Failed making current blanks for {}: {}'.format(ai.record_id, e))
try:
self._update_current(ai, dban=dban, force=True, update_age=False, commit=False)
except BaseException as e:
self.warning('Failed making intensities for {}: {}'.format(ai.record_id, e))
# if not i % 100:
# db.commit()
# db.flush()
with db.session_ctx():
for repo in db.get_repositories():
if repo.name in ('JIRSandbox', 'REEFenite', 'Henry01184', 'FractionatedRes',
'PowerZPattern'):
continue
self.debug('Updating currents for {}'.format(repo.name))
try:
st = time.time()
tans = db.get_repository_analysis_count(repo.name)
ans = db.get_analyses_no_current(repo.name)
self.debug('Total repo analyses={}, filtered={}'.format(tans, len(ans)))
if not ans:
continue
# if not self.confirmation_dialog('Updated currents for {}'.format(repo.name)):
# if self.confirmation_dialog('Stop update'):
# break
# else:
# continue
for chunk in chunks(ans, 200):
chunk = self.make_analyses(chunk)
if chunk:
progress_iterator(chunk, func)
db.commit()
db.flush()
self.info('Elapsed time {}: n={}, '
'{:0.2f} min'.format(repo.name, len(ans), (time.time() - st)) / 60.)
db.commit()
db.flush()
except BaseException as e:
self.warning('Failed making analyses for {}: {}'.format(repo.name, e))
db.commit_on_add = ocoa
db.close_session()
self.info('Generate currents finished')
def convert_uuid_runids(self, uuids):
with self.db.session_ctx():
ans = self.db.get_analyses_uuid(uuids)
return [an.record_id for an in ans]
# if uuid in self._uuid_runid_cache:
# r = self._uuid_runid_cache[uuid]
# else:
# with self.db.session_ctx():
# an = self.db.get_analysis_uuid(uuid)
# r = an.record_id
# self._uuid_runid_cache[uuid] = r
# return r
def find_associated_identifiers(self, samples):
from pychron.dvc.associated_identifiers import AssociatedIdentifiersView
av = AssociatedIdentifiersView()
for s in samples:
dbids = self.db.get_irradiation_position_by_sample(s.name, s.material, s.grainsize,
s.principal_investigator,
s.project)
av.add_items(dbids)
av.edit_traits(kind='modal')
def open_meta_repo(self):
mrepo = self.meta_repo
if self.meta_repo_name:
name = self.meta_repo_name
if self.meta_repo_dirname:
name = self.meta_repo_dirname
root = os.path.join(paths.dvc_dir, name)
self.debug('open meta repo {}'.format(root))
if os.path.isdir(os.path.join(root, '.git')):
self.debug('Opening Meta Repo')
mrepo.open_repo(root)
else:
url = self.make_url(self.meta_repo_name)
self.debug('cloning meta repo url={}'.format(url))
path = os.path.join(paths.dvc_dir, name)
self.meta_repo.clone(url, path)
return True
def synchronize(self, pull=True):
"""
pull meta_repo changes
:return:
"""
if pull:
self.meta_repo.pull()
else:
self.meta_repo.push()
def load_analysis_backend(self, ln, isotope_group):
db = self.db
with db.session_ctx():
ip = db.get_identifier(ln)
dblevel = ip.level
irrad = dblevel.irradiation.name
level = dblevel.name
pos = ip.position
fd = self.meta_repo.get_flux(irrad, level, pos)
_, prod = self.meta_repo.get_production(irrad, level, allow_null=True)
cs = self.meta_repo.get_chronology(irrad, allow_null=True)
x = datetime.now()
now = time.mktime(x.timetuple())
if fd['lambda_k']:
isotope_group.arar_constants.lambda_k = fd['lambda_k']
try:
pr = prod.to_dict(RATIO_KEYS)
except BaseException as e:
self.debug('invalid production. error={}'.format(e))
pr = {}
try:
ic = prod.to_dict(INTERFERENCE_KEYS)
except BaseException as e:
self.debug('invalid production. error={}'.format(e))
ic = {}
isotope_group.trait_set(j=fd['j'],
# lambda_k=lambda_k,
production_ratios=pr,
interference_corrections=ic,
chron_segments=cs.get_chron_segments(x),
irradiation_time=cs.irradiation_time,
timestamp=now)
return True
def analyses_db_sync(self, ln, ais, reponame):
self.info('sync db with analyses')
return self._sync_info(ln, ais, reponame)
def repository_db_sync(self, reponame, dry_run=False):
self.info('sync db with repo={} dry_run={}'.format(reponame, dry_run))
repo = self._get_repository(reponame, as_current=False)
db = self.db
repo.pull()
ps = []
with db.session_ctx():
ans = db.get_repository_analyses(reponame)
groups = [(g[0], list(g[1])) for g in groupby_key(ans, 'identifier')]
progress = open_progress(len(groups))
for ln, ais in groups:
progress.change_message('Syncing identifier: {}'.format(ln))
pss = self._sync_info(ln, ais, reponame, dry_run)
ps.extend(pss)
progress.close()
if ps and not dry_run:
# repo.pull()
repo.add_paths(ps)
repo.commit('<SYNC> Synced repository with database {}'.format(self.db.public_datasource_url))
repo.push()
self.info('finished db-repo sync for {}'.format(reponame))
def _sync_info(self, ln, ais, reponame, dry_run=False):
db = self.db
ip = db.get_identifier(ln)
dblevel = ip.level
irrad = dblevel.irradiation.name
level = dblevel.name
pos = ip.position
ps = []
for ai in ais:
p = analysis_path(ai, reponame)
if p and os.path.isfile(p):
try:
obj = dvc_load(p)
except ValueError:
self.warning('Skipping {}. invalid file'.format(p))
continue
else:
self.warning('Skipping {}. no file'.format(ai.record_id))
continue
sample = ip.sample.name
project = ip.sample.project.name
material = ip.sample.material.name
changed = False
for attr, v in (('sample', sample),
('project', project),
('material', material),
('irradiation', irrad),
('irradiation_level', level),
('irradiation_position', pos)):
ov = obj.get(attr)
if ov != v:
self.info('{:<20s} repo={} db={}'.format(attr, ov, v))
obj[attr] = v
changed = True
if changed:
self.debug('{}'.format(p))
ps.append(p)
if not dry_run:
dvc_dump(obj, p)
return ps
def repository_transfer(self, ans, dest):
destrepo = self._get_repository(dest, as_current=False)
for src, ais in groupby_repo(ans):
repo = self._get_repository(src, as_current=False)
for ai in ais:
ops, nps = self._transfer_analysis_to(dest, src, ai.runid)
repo.add_paths(ops)
destrepo.add_paths(nps)
# update database
dbai = self.db.get_analysis_uuid(ai.uuid)
for ri in dbai.repository_associations:
if ri.repository == src:
ri.repository = dest
# commit src changes
repo.commit('Transferred analyses to {}'.format(dest))
dest.commit('Transferred analyses from {}'.format(src))
def get_flux(self, irrad, level, pos):
fd = self.meta_repo.get_flux(irrad, level, pos)
return fd['j']
def freeze_flux(self, ans):
self.info('freeze flux')
def ai_gen():
for irrad, ais in groupby_key(ans, 'irradiation'):
for level, ais in groupby_key(ais, 'level'):
p = self.get_level_path(irrad, level)
obj = dvc_load(p)
if isinstance(obj, list):
positions = obj
else:
positions = obj['positions']
for repo, ais in groupby_repo(ais):
yield repo, irrad, level, {ai.irradiation_position: positions[ai.irradiation_position] for ai in
ais}
added = []
def func(x, prog, i, n):
repo, irrad, level, d = x
if prog:
prog.change_message('Freezing Flux {}{} Repository={}'.format(irrad, level, repo))
root = repository_path(repo, 'flux', irrad)
r_mkdir(root)
p = os.path.join(root, level)
if os.path.isfile(p):
dd = dvc_load(p)
dd.update(d)
dvc_dump(d, p)
added.append((repo, p))
progress_loader(ai_gen(), func, threshold=1)
self._commit_freeze(added, '<FLUX_FREEZE>')
def freeze_production_ratios(self, ans):
self.info('freeze production ratios')
def ai_gen():
for irrad, ais in groupby_key(ans, 'irradiation'):
for level, ais in groupby_key(ais, 'level'):
pr = self.meta_repo.get_production(irrad, level)
for ai in ais:
yield pr, ai
added = []
def func(x, prog, i, n):
pr, ai = x
if prog:
prog.change_message('Freezing Production {}'.format(ai.runid))
p = analysis_path(ai, ai.repository_identifier, 'productions', mode='w')
pr.dump(path=p)
added.append((ai.repository_identifier, p))
progress_loader(ai_gen(), func, threshold=1)
self._commit_freeze(added, '<PR_FREEZE>')
def manual_edit(self, runid, repository_identifier, values, errors, modifier):
self.debug('manual edit {} {} {}'.format(runid, repository_identifier, modifier))
self.debug('values {}'.format(values))
self.debug('errors {}'.format(errors))
path = analysis_path(runid, repository_identifier, modifier=modifier)
obj = dvc_load(path)
for k, v in values.items():
o = obj[k]
o['manual_value'] = v
o['use_manual_value'] = True
for k, v in errors.items():
o = obj[k]
o['manual_error'] = v
o['use_manual_error'] = True
dvc_dump(obj, path)
return path
def revert_manual_edits(self, analysis, repository_identifier):
ps = []
for mod in ('intercepts', 'blanks', 'baselines', 'icfactors'):
path = analysis_path(analysis, repository_identifier, modifier=mod)
with open(path, 'r') as rfile:
obj = json.load(rfile)
for item in obj.values():
if isinstance(item, dict):
item['use_manual_value'] = False
item['use_manual_error'] = False
ps.append(path)
dvc_dump(obj, path)
msg = '<MANUAL> reverted to non manually edited'
self.commit_manual_edits(repository_identifier, ps, msg)
def commit_manual_edits(self, repository_identifier, ps, msg):
if self.repository_add_paths(repository_identifier, ps):
self.repository_commit(repository_identifier, msg)
def status_view(self, repo):
repo = self._get_repository(repo, as_current=False)
v = StatusView(status=repo.status())
v.edit_traits()
def add_bookmark(self, repo, name, message=None, hexsha=None):
if not message:
message = 'No message provided'
repo = self._get_repository(repo, as_current=False)
repo.add_tag(name, message, hexsha)
def update_analysis_paths(self, items, msg):
"""
items is a list of (analysis, path) tuples
:param items:
:param msg:
:return:
"""
mod_repositories = []
def key(x):
return x[0].repository_identifier
for expid, ais in groupby(sorted(items, key=key), key=key):
ps = [p for _, p in ais]
if self.repository_add_paths(expid, ps):
self.repository_commit(expid, msg)
mod_repositories.append(expid)
return mod_repositories
def update_analyses(self, ans, modifiers, msg):
if not isinstance(modifiers, (list, tuple)):
modifiers = (modifiers,)
mod_repositories = []
for expid, ais in groupby_repo(ans):
ps = [analysis_path(x, x.repository_identifier, modifier=modifier) for x in ais for modifier in modifiers]
if self.repository_add_paths(expid, ps):
self.repository_commit(expid, msg)
mod_repositories.append(expid)
return mod_repositories
def update_tag(self, an, add=True, **kw):
tag = Tag.from_analysis(an, **kw)
tag.dump()
expid = an.repository_identifier
if add:
return self.repository_add_paths(expid, tag.path)
else:
return tag.path
def delete_existing_icfactors(self, ai, dets):
# remove all icfactors not in dets
if dets:
self.info('Delete existing icfactors for {}'.format(ai))
ai.delete_icfactors(dets)
if self._cache:
self._cache.remove(ai.uiid)
self._update_current_age(ai)
def save_icfactors(self, ai, dets, fits, refs):
if fits and dets:
self.info('Saving icfactors for {}'.format(ai))
ai.dump_icfactors(dets, fits, refs, reviewed=True)
if self._cache:
self._cache.remove(ai.uiid)
self._update_current_age(ai)
def save_blanks(self, ai, keys, refs):
if keys:
self.info('Saving blanks for {}'.format(ai))
ai.dump_blanks(keys, refs, reviewed=True)
if self._cache:
self._cache.remove(ai.uiid)
self._update_current_blanks(ai, keys)
def save_defined_equilibration(self, ai, keys):
if keys:
self.info('Saving equilibration for {}'.format(ai))
if self._cache:
self._cache.remove(ai.uiid)
self._update_current(ai, keys)
return ai.dump_equilibration(keys, reviewed=True)
def save_fits(self, ai, keys):
if keys:
self.info('Saving fits for {}'.format(ai))
ai.dump_fits(keys, reviewed=True)
if self._cache:
self._cache.remove(ai.uiid)
self._update_current(ai, keys)
def save_flux(self, identifier, j, e):
"""
user manually edit flux via the automated run factory
:param identifier:
:param j:
:param e:
:return:
"""
self.meta_pull()
with self.session_ctx(use_parent_session=False):
irp = self.get_identifier(identifier)
if irp:
level = irp.level
irradiation = level.irradiation
self._save_j(irradiation.name, level.name, irp.position, identifier,
j, e, 0, 0, 0, None, None, None, False)
self.meta_commit('User manual edited flux')
self.meta_push()
def save_flux_position(self, flux_position, options, decay_constants, add=False):
"""
save flux called from FluxPersistNode
:param flux_position:
:param options:
:param decay_constants:
:param add:
:return:
"""
irradiation = flux_position.irradiation
level = flux_position.level
pos = flux_position.hole_id
identifier = flux_position.identifier
j = flux_position.j
e = flux_position.jerr
mj = flux_position.mean_j
me = flux_position.mean_jerr
analyses = flux_position.analyses
position_jerr = flux_position.position_jerr
self._save_j(irradiation, level, pos, identifier, j, e, mj, me, position_jerr, decay_constants, analyses,
options, add)
def save_csv_dataset(self, name, repository, lines, local_path=False):
if local_path:
p = add_extension(local_path, '.csv')
else:
repo = self.get_repository(repository)
root = os.path.join(repo.path, 'csv')
p = os.path.join(root, add_extension(name, '.csv'))
if repo.smart_pull(quiet=False):
if not os.path.isdir(root):
os.mkdir(root)
else:
self.warning_dialog('Failed to update repository. Not saving CSV file "{}"'.format(p))
return
self.debug('writing dataset to {}'.format(p))
exists = os.path.isfile(p)
with open(p, 'w') as wfile:
wfile.writelines(lines)
if not local_path:
if repo.add_paths(p):
repo.commit('<CSV> {} dataset "{}"'.format('Modified' if exists else 'Added', name))
return p
def remove_irradiation_position(self, irradiation, level, hole):
db = self.db
dbpos = db.get_irradiation_position(irradiation, level, hole)
if dbpos:
db.delete(dbpos)
self.meta_repo.remove_irradiation_position(irradiation, level, hole)
def find_interpreted_ages(self, identifiers, repositories):
self.debug('find interpreted ages {}, {}'.format(identifiers, repositories))
ias = [InterpretedAgeRecordView(idn, path, dvc_load(path))
for idn in identifiers
for path in find_interpreted_age_path(idn, repositories)]
return ias
def find_flux_monitors(self, irradiation, levels, sample, make_records=True):
db = self.db
with db.session_ctx():
ans = db.get_flux_monitor_analyses(irradiation, levels, sample)
for a in ans:
a.bind()
if make_records:
ans = self.make_analyses(ans)
return ans
def find_references_by_load(self, load, atypes, make_records=True, **kw):
records = self.db.find_references_by_load(load, atypes, **kw)
if records:
for r in records:
r.bind()
if make_records:
records = self.make_analyses(records)
return records
def find_references(self, times, atypes, hours, exclude=None, make_records=True, **kw):
records = self.db.find_references(times, atypes, hours, exclude=exclude, **kw)
if records:
for r in records:
r.bind()
if make_records:
records = self.make_analyses(records)
return records
def make_interpreted_ages(self, ias):
self.debug('making interpreted ages {}'.format(ias))
if not isinstance(ias, (tuple, list)):
ias = (ias,)
def func(x, prog, i, n):
if prog:
prog.change_message('Making Interpreted age {}'.format(x.name))
obj = dvc_load(x.path)
print('asdfasdf', x.path, obj)
ia = DVCInterpretedAge()
ia.repository_identifier = os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(x.path))))
ia.from_json(obj)
try:
ta = analysis_path(ia, ia.repository_identifier, modifier='tags')
if ta is not None:
ia.load_tag(dvc_load(ta))
except AnalysisNotAnvailableError:
pass
return ia
return progress_loader(ias, func, step=25)
def get_analysis(self, uuid):
an = self.db.get_analysis_uuid(uuid)
if an:
return self.make_analysis(an)
def make_analysis(self, record, *args, **kw):
a = self.make_analyses((record,), *args, **kw)
if a:
return a[0]
def make_analyses(self, records, calculate_f_only=False, reload=False, quick=False, use_progress=True):
if not records:
return []
globalv.active_analyses = records
# load repositories
st = time.time()
if self.use_cache:
cached_records = []
nrecords = []
cache = self._cache
# get items from the cache
for ri in records:
r = cache.get(ri.uuid)
if r is not None:
cached_records.append(r)
else:
nrecords.append(ri)
records = nrecords
def func(xi, prog, i, n):
if prog:
prog.change_message('Syncing repository= {}'.format(xi))
try:
self.sync_repo(xi, use_progress=False)
except BaseException:
pass
bad_records = [r for r in records if r.repository_identifier is None]
if bad_records:
self.warning_dialog('Missing Repository Associations. Contact an expert!'
'Cannot load analyses "{}"'.format(','.join([r.record_id for r in
bad_records])))
records = [r for r in records if r.repository_identifier is not None]
if not records:
if self.use_cache:
cache.clean()
return cached_records
else:
return []
exps = {r.repository_identifier for r in records}
if use_progress:
progress_iterator(exps, func, threshold=1)
else:
for ei in exps:
self.sync_repo(ei, use_progress=False)
try:
branches = {ei: get_repository_branch(repository_path(ei)) for ei in exps}
except NoSuchPathError:
return []
fluxes = {}
productions = {}
chronos = {}
sens = {}
frozen_fluxes = {}
frozen_productions = {}
meta_repo = self.meta_repo
use_cocktail_irradiation = self.use_cocktail_irradiation
if not quick:
for exp in exps:
ps = get_frozen_productions(exp)
frozen_productions.update(ps)
for r in records:
irrad = r.irradiation
if irrad != 'NoIrradiation':
if irrad not in frozen_fluxes:
frozen_fluxes[irrad] = get_frozen_flux(r.repository_identifier, r.irradiation)
level = r.irradiation_level
if irrad in fluxes:
flux_levels = fluxes[irrad]
prod_levels = productions[irrad]
else:
flux_levels = {}
prod_levels = {}
if level not in flux_levels:
flux_levels[level] = meta_repo.get_flux_positions(irrad, level)
prod_levels[level] = meta_repo.get_production(irrad, level)
if irrad not in chronos:
chronos[irrad] = meta_repo.get_chronology(irrad)
if irrad not in fluxes:
fluxes[irrad] = flux_levels
productions[irrad] = prod_levels
if use_cocktail_irradiation and r.analysis_type == 'cocktail' and 'cocktail' not in chronos:
cirr = meta_repo.get_cocktail_irradiation()
chronos['cocktail'] = cirr.get('chronology')
fluxes['cocktail'] = cirr.get('flux')
sens = meta_repo.get_sensitivities()
def func(*args):
try:
return self._make_record(branches=branches, chronos=chronos, productions=productions,
fluxes=fluxes, calculate_f_only=calculate_f_only, sens=sens,
frozen_fluxes=frozen_fluxes, frozen_productions=frozen_productions,
quick=quick,
reload=reload, *args)
except BaseException:
record = args[0]
self.debug('make analysis exception: repo={}, record_id={}'.format(record.repository_identifier,
record.record_id))
self.debug_exception()
if use_progress:
ret = progress_loader(records, func, threshold=1, step=25)
else:
ret = [func(r, None, 0, 0) for r in records]
et = time.time() - st
n = len(ret)
if n:
self.debug('Make analysis time, total: {}, n: {}, average: {}'.format(et, n, et / float(n)))
if self.use_cache:
cache.clean()
ret = cached_records + ret
return ret
# repositories
def find_changes(self, names, remote, branch):
gs = self.application.get_services(IGitHost)
for gi in gs:
gi.new_session()
def func(item, prog, i, n):
name = item.name
if prog:
prog.change_message('Examining: {}({}/{})'.format(name, i, n))
self.debug('examining {}'.format(name))
r = Repo(repository_path(name))
lc = r.commit(branch).hexsha
for gi in gs:
outdated, sha = gi.up_to_date(self.organization, name, lc, branch)
if outdated:
try:
fsha = r.commit('FETCH_HEAD').hexsha
except BaseException:
fsha = None
try:
if fsha != sha:
self.debug('fetching {}'.format(name))
r.git.fetch()
item.dirty = True
item.update(fetch=False)
except GitCommandError as e:
self.warning('error examining {}. {}'.format(name, e))
else:
item.update(fetch=False)
progress_loader(names, func, threshold=1)
for gi in gs:
gi.close_session()
def repository_add_paths(self, repository_identifier, paths):
repo = self._get_repository(repository_identifier)
return repo.add_paths(paths)
def repository_commit(self, repository, msg):
self.debug('Repository commit: {} msg: {}'.format(repository, msg))
repo = self._get_repository(repository)
repo.commit(msg)
def remote_repositories(self):
rs = []
gs = self.application.get_services(IGitHost)
if gs:
for gi in gs:
ri = gi.get_repos(self.organization)
rs.extend(ri)
else:
self.warning_dialog(HOST_WARNING_MESSAGE)
return rs
def remote_repository_names(self):
rs = []
gs = self.application.get_services(IGitHost)
if gs:
for gi in gs:
self.debug('load repositories from {}'.format(self.organization))
ri = gi.get_repository_names(self.organization)
rs.extend(ri)
else:
self.warning_dialog(HOST_WARNING_MESSAGE)
return rs
def check_githost_connection(self):
git_service = self.application.get_service(IGitHost)
return git_service.test_connection(self.organization)
def make_url(self, name, **kw):
git_service = self.application.get_service(IGitHost)
return git_service.make_url(name, self.organization, **kw)
def git_session_ctx(self, repository_identifier, message):
return GitSessionCTX(self, repository_identifier, message)
def sync_repo(self, name, use_progress=True):
"""
pull or clone an repo
"""
root = repository_path(name)
exists = os.path.isdir(os.path.join(root, '.git'))
self.debug('sync repository {}. exists={}'.format(name, exists))
if exists:
repo = self._get_repository(name)
repo.pull(use_progress=use_progress, use_auto_pull=self.use_auto_pull)
return True
else:
self.debug('getting repository from remote')
service = self.application.get_service(IGitHost)
if not service:
return True
else:
names = self.remote_repository_names()
if name in names:
service.clone_from(name, root, self.organization)
return True
else:
if isinstance(service, LocalGitHostService):
service.create_empty_repo(name)
return True
else:
self.warning_dialog('name={} not in available repos '
'from service={}, organization={}'.format(name,
service.remote_url,
self.organization))
for ni in names:
self.debug('available repo== {}'.format(ni))
def rollback_repository(self, expid):
repo = self._get_repository(expid)
cpaths = repo.get_local_changes()
# cover changed paths to a list of analyses
# select paths to revert
rpaths = ('.',)
repo.cmd('checkout', '--', ' '.join(rpaths))
for p in rpaths:
self.debug('revert changes for {}'.format(p))
head = repo.get_head(hexsha=False)
msg = 'Changes to {} reverted to Commit: {}\n' \
'Date: {}\n' \
'Message: {}'.format(expid, head.hexsha[:10],
format_date(head.committed_date),
head.message)
self.information_dialog(msg)
def pull_repository(self, repo):
repo = self._get_repository(repo)
self.debug('pull repository {}'.format(repo))
for gi in self.application.get_services(IGitHost):
self.debug('pull to remote={}, url={}'.format(gi.default_remote_name, gi.remote_url))
repo.smart_pull(remote=gi.default_remote_name)
def push_repository(self, repo, **kw):
repo = self._get_repository(repo)
self.debug('push repository {}'.format(repo))
for gi in self.application.get_services(IGitHost):
self.debug('pushing to remote={}, url={}'.format(gi.default_remote_name, gi.remote_url))
repo.push(remote=gi.default_remote_name, **kw)
def push_repositories(self, changes):
for gi in self.application.get_services(IGitHost):
push_repositories(changes, gi, quiet=False)
def delete_local_commits(self, repo, **kw):
r = self._get_repository(repo)
r.delete_local_commits(**kw)
# IDatastore
def get_greatest_aliquot(self, identifier):
return self.db.get_greatest_aliquot(identifier)
def get_greatest_step(self, identifier, aliquot):
return self.db.get_greatest_step(identifier, aliquot)
def is_connected(self):
return self.db.connected
def connect(self, *args, **kw):
return self.db.connect(*args, **kw)
# meta repo
def update_flux(self, *args, **kw):
self.meta_repo.update_flux(*args, **kw)
def set_identifier(self, irradiation, level, position, identifier):
dbpos = self.db.get_irradiation_position(irradiation, level, position)
if dbpos:
dbpos.identifier = identifier
self.db.commit()
self.meta_repo.set_identifier(irradiation, level, position, identifier)
def add_production_to_irradiation(self, irrad, reactor, params, msg=None):
self.meta_repo.add_production_to_irradiation(irrad, reactor, params)
if msg is None:
msg = 'updated default production. {}'.format(reactor)
self.meta_commit(msg)
def update_chronology(self, name, doses):
self.meta_repo.update_chronology(name, doses)
self.meta_commit('updated chronology for {}'.format(name))
def meta_pull(self, **kw):
return self.meta_repo.smart_pull(**kw)
def meta_push(self):
self.meta_repo.push()
def meta_add_all(self):
self.meta_repo.add_unstaged(paths.meta_root, add_all=True)
def meta_commit(self, msg):
changes = self.meta_repo.has_staged()
if changes:
self.debug('meta repo has changes: {}'.format(changes))
self.meta_repo.report_local_changes()
self.meta_repo.commit(msg)
self.meta_repo.clear_cache = True
else:
self.debug('no changes to meta repo')
def add_production(self, irrad, name, prod):
self.meta_repo.add_production_to_irradiation(irrad, name, prod)
def get_production(self, irrad, name):
return self.meta_repo.get_production(irrad, name)
# get
def get_csv_datasets(self, repo):
repo = self.get_repository(repo)
return list_directory(os.path.join(repo.path, 'csv'), extension='.csv', remove_extension=True)
def get_local_repositories(self):
return list_subdirectories(paths.repository_dataset_dir)
def get_repository(self, exp):
return self._get_repository(exp)
def get_meta_head(self):
return self.meta_repo.get_head()
def get_irradiation_geometry(self, irrad, level):
dblevel = self.db.get_irradiation_level(irrad, level)
return self.meta_repo.get_irradiation_holder_holes(dblevel.holder), dblevel.holder
def get_irradiation_names(self):
irrads = self.db.get_irradiations()
return [i.name for i in irrads]
def get_irradiations(self, *args, **kw):
sort_name_key = self.irradiation_prefix
return self.db.get_irradiations(sort_name_key=sort_name_key, *args, **kw)
# add
def add_interpreted_ages(self, rid, iass):
ps = []
ialabels = []
for ia in iass:
d = make_interpreted_age_dict(ia)
rid, p = self._add_interpreted_age(ia, d)
ps.append(p)
ialabels.append('{} {} {}'.format(ia.name, ia.identifier, ia.sample))
if self.repository_add_paths(rid, ps):
sparrow = self.application.get_service('pychron.sparrow.sparrow.Sparrow')
if sparrow:
if sparrow.connect():
for p in ps:
sparrow.insert_ia(p)
else:
self.warning('Connection failed. Cannot add IAs to Sparrow')
self.repository_commit(rid, '<IA> added interpreted ages {}'.format(','.join(ialabels)))
return True
def add_interpreted_age(self, ia):
d = make_interpreted_age_dict(ia)
rid, p = self._add_interpreted_age(ia, d)
if self.repository_add_paths(rid, p):
self.repository_commit(rid, '<IA> added interpreted age '
'{} identifier={} sample={}'.format(ia.name, ia.identifier, ia.sample))
def add_repository_association(self, expid, runspec):
db = self.db
dban = db.get_analysis_uuid(runspec.uuid)
if dban:
for e in dban.repository_associations:
if e.repository == expid:
break
else:
db.add_repository_association(expid, dban)
src_expid = runspec.repository_identifier
if src_expid != expid:
repo = self._get_repository(expid)
for m in PATH_MODIFIERS:
src = analysis_path(runspec, src_expid, modifier=m)
dest = analysis_path(runspec, expid, modifier=m, mode='w')
shutil.copyfile(src, dest)
repo.add(dest, commit=False)
repo.commit('added repository association')
else:
self.warning('{} not in the database {}'.format(runspec.runid, self.db.name))
def add_material(self, name, grainsize=None):
db = self.db
added = False
if not db.get_material(name, grainsize):
added = True
db.add_material(name, grainsize)
return added
def add_project(self, name, principal_investigator=None, **kw):
added = False
db = self.db
if not db.get_project(name, principal_investigator):
added = True
db.add_project(name, principal_investigator, **kw)
return added
def add_sample(self, name, project, pi, material, grainsize=None, note=None, **kw):
added = False
db = self.db
if not db.get_sample(name, project, pi, material, grainsize):
added = True
db.add_sample(name, project, pi, material, grainsize, note=note, **kw)
return added
def add_principal_investigator(self, name, **kw):
added = False
db = self.db
if not db.get_principal_investigator(name):
db.add_principal_investigator(name, **kw)
added = True
return added
def add_irradiation_position(self, irrad, level, pos, identifier=None, **kw):
db = self.db
added = False
if not db.get_irradiation_position(irrad, level, pos):
db.add_irradiation_position(irrad, level, pos, identifier, **kw)
self.meta_repo.add_position(irrad, level, pos)
added = True
return added
def add_irradiation_level(self, name, irradiation, holder, production_name, **kw):
added = False
dblevel = self.get_irradiation_level(irradiation, name)
if dblevel is None:
added = True
self.db.add_irradiation_level(name, irradiation, holder, production_name, **kw)
self.meta_repo.add_level(irradiation, name)
self.meta_repo.update_level_production(irradiation, name, production_name)
return added
def clone_repository(self, identifier):
root = repository_path(identifier)
if not os.path.isdir(root):
self.debug('cloning {}'.format(root))
url = self.make_url(identifier)
Repo.clone_from(url, root)
else:
self.debug('{} already exists'.format(identifier))
def check_remote_repository_exists(self, name):
gs = self.application.get_services(IGitHost)
for gi in gs:
if gi.remote_exists(self.organization, name):
return True
def add_repository(self, identifier, principal_investigator, inform=True):
self.debug('trying to add repository identifier={}, pi={}'.format(identifier, principal_investigator))
root = repository_path(identifier)
if os.path.isdir(root):
self.db.add_repository(identifier, principal_investigator)
self.debug('already a directory {}'.format(identifier))
if inform:
self.warning_dialog('{} already exists.'.format(root))
return True
names = self.remote_repository_names()
if identifier in names:
# make sure also in the database
self.db.add_repository(identifier, principal_investigator)
if inform:
self.warning_dialog('Repository "{}" already exists'.format(identifier))
return True
else:
if os.path.isdir(root):
self.db.add_repository(identifier, principal_investigator)
if inform:
self.warning_dialog('{} already exists.'.format(root))
else:
self.db.add_repository(identifier, principal_investigator)
ret = True
gs = self.application.get_services(IGitHost)
if gs:
ret = False
for i, gi in enumerate(gs):
self.info('Creating repository at {}. {}'.format(gi.name, identifier))
if gi.create_repo(identifier, organization=self.organization):
ret = True
if isinstance(gi, LocalGitHostService):
if i == 0:
self.db.add_repository(identifier, principal_investigator)
else:
if self.default_team:
gi.set_team(self.default_team, self.organization, identifier,
permission='push')
url = gi.make_url(identifier, self.organization)
if i == 0:
try:
repo = Repo.clone_from(url, root)
except BaseException as e:
self.debug('failed cloning repo. {}'.format(e))
ret = False
self.db.add_repository(identifier, principal_investigator)
else:
repo.create_remote(gi.default_remote_name or 'origin', url)
return ret
def add_irradiation(self, name, doses=None, verbose=True):
if self.db.get_irradiation(name):
if verbose:
self.warning('irradiation {} already exists'.format(name))
return
self.db.add_irradiation(name)
self.meta_repo.add_irradiation(name)
self.meta_repo.add_chronology(name, doses)
root = os.path.join(paths.meta_root, name)
p = os.path.join(root, 'productions')
if not os.path.isdir(p):
os.mkdir(p)
p = os.path.join(root, 'productions.json')
with open(p, 'w') as wfile:
json.dump({}, wfile)
self.meta_repo.add(p, commit=False)
return True
def add_load_holder(self, name, path_or_txt):
self.db.add_load_holder(name)
self.meta_repo.add_load_holder(name, path_or_txt)
def copy_production(self, pr):
"""
@param pr: irrad_ProductionTable object
@return:
"""
pname = pr.name.replace(' ', '_')
path = os.path.join(paths.meta_root, 'productions', '{}.json'.format(pname))
if not os.path.isfile(path):
obj = {}
for attr in INTERFERENCE_KEYS + RATIO_KEYS:
obj[attr] = [getattr(pr, attr), getattr(pr, '{}_err'.format(attr))]
dvc_dump(obj, path)
# def save_tag_subgroup_items(self, items):
#
# for expid, ans in groupby_repo(items):
# self.sync_repo(expid)
# ps = []
# for it in ans:
# tag = Tag.from_analysis(it)
# tag.dump()
#
# ps.append(tag.path)
#
# if self.repository_add_paths(expid, ps):
# self._commit_tags(ans, expid, '<SUBGROUP>', refresh=False)
def tag_items(self, tag, items, note=''):
self.debug('tag items with "{}"'.format(tag))
with self.db.session_ctx() as sess:
for expid, ans in groupby_repo(items):
self.sync_repo(expid)
cs = []
ps = []
for it in ans:
if not isinstance(it, (InterpretedAge, DVCAnalysis)):
oit = self.make_analysis(it, quick=True)
if oit is None:
self.warning('Failed preparing analysis. Cannot tag: {}'.format(it))
it = oit
if it:
self.debug('setting {} tag= {}'.format(it.record_id, tag))
if not isinstance(it, InterpretedAge):
self.set_analysis_tag(it, tag)
it.set_tag({'name': tag, 'note': note or ''})
path = self.update_tag(it, add=False)
ps.append(path)
cs.append(it)
sess.commit()
if ps:
if self.repository_add_paths(expid, ps):
self._commit_tags(cs, expid, '<TAG> {:<6s}'.format(tag))
def get_repository(self, repo):
return self._get_repository(repo, as_current=False)
def clear_cache(self):
if self.use_cache:
self._cache.clear()
# private
def _update_current_blanks(self, ai, keys=None, dban=None, force=False, update_age=True, commit=True):
if self.update_currents_enabled:
db = self.db
if dban is None:
dban = db.get_analysis_uuid(ai.uuid)
if keys is None:
keys = ai.isotope_keys
if dban:
for k in keys:
iso = ai.get_isotope(k)
if iso:
iso = iso.blank
db.update_current(dban, '{}_blank'.format(k), iso.value, iso.error, iso.units, force=force)
if update_age:
self._update_current_age(ai, dban, force=force)
if commit:
db.commit()
else:
self.warning('Failed to update current values. '
'Could not located RunID={}, UUID={}'.format(ai.runid, ai.uuid))
def _update_current_age(self, ai, dban=None, force=False):
if self.update_currents_enabled:
if dban is None:
db = self.db
dban = db.get_analysis_uuid(ai.uuid)
if dban:
age_units = ai.arar_constants.age_units
self.db.update_current(dban, 'age', ai.age, ai.age_err, age_units, force=force)
self.db.update_current(dban, 'age_wo_j_error', ai.age, ai.age_err_wo_j, age_units, force=force)
def _update_current(self, ai, keys=None, dban=None, force=False, update_age=True, commit=True):
if self.update_currents_enabled:
db = self.db
if dban is None:
dban = db.get_analysis_uuid(ai.uuid)
if dban:
if keys is None:
keys = ai.isotope_keys
keys += [iso.detector for iso in ai.iter_isotopes()]
for k in keys:
iso = ai.get_isotope(k)
if iso is None:
iso = ai.get_isotope(detector=k)
bs = iso.baseline
db.update_current(dban, '{}_baseline'.format(k), bs.value, bs.error, bs.units, force=force)
db.update_current(dban, '{}_baseline_n'.format(k), bs.n, None, 'int', force=force)
else:
db.update_current(dban, '{}_n'.format(k), iso.n, None, 'int', force=force)
db.update_current(dban, '{}_intercept'.format(k), iso.value, iso.error, iso.units, force=force)
v = iso.get_ic_corrected_value()
db.update_current(dban, '{}_ic_corrected'.format(k), nominal_value(v), std_dev(v), iso.units,
force=force)
v = iso.get_baseline_corrected_value()
db.update_current(dban, '{}_bs_corrected'.format(k), nominal_value(v), std_dev(v), iso.units,
force=force)
v = iso.get_non_detector_corrected_value()
db.update_current(dban, k, nominal_value(v), std_dev(v), iso.units, force=force)
if update_age:
self._update_current_age(ai, dban, force=force)
if commit:
db.commit()
else:
self.warning('Failed to update current values. '
'Could not located RunID={}, UUID={}'.format(ai.runid, ai.uuid))
def _transfer_analysis_to(self, dest, src, rid):
p = analysis_path(rid, src)
np = analysis_path(rid, dest)
obj = dvc_load(p)
obj['repository_identifier'] = dest
dvc_dump(obj, p)
ops = [p]
nps = [np]
shutil.move(p, np)
for modifier in PATH_MODIFIERS:
if modifier:
p = analysis_path(rid, src, modifier=modifier)
np = analysis_path(rid, dest, modifier=modifier)
shutil.move(p, np)
ops.append(p)
nps.append(np)
return ops, nps
def _commit_freeze(self, added, msg):
for repo, ps in groupby_key(added, key=itemgetter(0)):
rm = GitRepoManager()
rm.open_repo(repo, paths.repository_dataset_dir)
rm.add_paths(ps)
rm.smart_pull()
rm.commit(msg)
def _commit_tags(self, cs, expid, msg, refresh=True):
if cs:
cc = [c.record_id for c in cs]
if len(cc) > 1:
cstr = '{} - {}'.format(cc[0], cc[-1])
else:
cstr = cc[0]
self.repository_commit(expid, '{} {}'.format(msg, cstr))
if refresh:
for ci in cs:
ci.refresh_view()
def _save_j(self, irradiation, level, pos, identifier, j, e, mj, me, position_jerr, decay_constants, analyses,
options, add):
self.info('Saving j for {}{}:{} {}, j={} +/-{}'.format(irradiation, level,
pos, identifier, j, e))
self.meta_repo.update_flux(irradiation, level, pos, identifier, j, e, mj, me,
decay=decay_constants,
analyses=analyses,
options=options, add=add,
position_jerr=position_jerr)
if self.update_currents_enabed:
ans = self.db.get_labnumber_analyses([identifier])
for ai in self.make_analyses(ans):
self._update_current_age(ai)
def _add_interpreted_age(self, ia, d):
rid = ia.repository_identifier
ia_path_name = ia.identifier.replace(':', '_')
i = 0
while 1:
p = analysis_path('{}_{:05d}'.format(ia_path_name, i), rid, modifier='ia', mode='w')
i += 1
if not os.path.isfile(p):
break
self.debug('saving interpreted age. {}'.format(p))
dvc_dump(d, p)
return rid, p
def _load_repository(self, expid, prog, i, n):
if prog:
prog.change_message('Loading repository {}. {}/{}'.format(expid, i, n))
self.sync_repo(expid)
def _make_record(self, record, prog, i, n, productions=None, chronos=None, branches=None, fluxes=None, sens=None,
frozen_fluxes=None, frozen_productions=None,
calculate_f_only=False, reload=False, quick=False):
meta_repo = self.meta_repo
if prog:
# this accounts for ~85% of the time!!!
prog.change_message('Loading analysis {}. {}/{}'.format(record.record_id, i, n))
expid = record.repository_identifier
if not expid:
exps = record.repository_ids
self.debug('Analysis {} is associated multiple repositories '
'{}'.format(record.record_id, ','.join(exps)))
expid = None
if self.selected_repositories:
rr = [si for si in self.selected_repositories if si in exps]
if rr:
if len(rr) > 1:
expid = self._get_requested_experiment_id(rr)
else:
expid = rr[0]
if expid is None:
expid = self._get_requested_experiment_id(exps)
if isinstance(record, DVCAnalysis) and not reload:
a = record
else:
# self.debug('use_repo_suffix={} record_id={}'.format(record.use_repository_suffix, record.record_id))
rid = record.record_id
uuid = record.uuid
try:
a = DVCAnalysis(uuid, rid, expid)
except AnalysisNotAnvailableError:
try:
a = DVCAnalysis(uuid, rid, expid)
except AnalysisNotAnvailableError:
self.warning_dialog('Analysis {} not in repository {}. '
'You many need to pull changes'.format(rid, expid))
return
a.group_id = record.group_id
if not quick:
a.load_name = record.load_name
a.load_holder = record.load_holder
# get repository branch
a.branch = branches.get(expid, '')
# load irradiation
if sens:
sens = sens.get(a.mass_spectrometer.lower(), [])
a.set_sensitivity(sens)
if a.analysis_type == 'cocktail' and 'cocktail' in chronos:
a.set_chronology(chronos['cocktail'])
a.j = fluxes['cocktail']
elif a.irradiation: # and a.irradiation not in ('NoIrradiation',):
if chronos:
chronology = chronos.get(a.irradiation, None)
else:
chronology = meta_repo.get_chronology(a.irradiation)
if chronology:
a.set_chronology(chronology)
pname, prod = None, None
if frozen_productions:
try:
prod = frozen_productions['{}.{}'.format(a.irradiation, a.irradiation_level)]
pname = prod.name
except KeyError:
pass
if not prod:
if a.irradiation != 'NoIrradiation':
try:
pname, prod = productions[a.irradiation][a.irradiation_level]
except KeyError:
pname, prod = meta_repo.get_production(a.irradiation, a.irradiation_level)
self.warning('production key error name={} '
'irrad={}, level={}, productions={}'.format(pname,
a.irradiation,
a.irradiation_level,
productions))
if prod is not None:
a.set_production(pname, prod)
fd = None
if frozen_fluxes:
try:
fd = frozen_fluxes[a.irradiation][a.identifier]
except KeyError:
pass
if not fd:
if fluxes:
try:
level_flux = fluxes[a.irradiation][a.irradiation_level]
fd = meta_repo.get_flux_from_positions(a.irradiation_position, level_flux)
except KeyError:
fd = {'j': ufloat(0, 0)}
else:
fd = meta_repo.get_flux(a.irradiation,
a.irradiation_level,
a.irradiation_position_position)
a.j = fd.get('j', ufloat(0, 0))
a.position_jerr = fd.get('position_jerr', 0)
j_options = fd.get('options')
if j_options:
a.model_j_kind = fd.get('model_kind')
lk = fd.get('lambda_k')
if lk:
a.arar_constants.lambda_k = lk
for attr in ('age', 'name', 'material', 'reference'):
skey = 'monitor_{}'.format(attr)
try:
setattr(a, skey, fd[skey])
except KeyError as e:
try:
key = 'standard_{}'.format(attr)
setattr(a, skey, fd[key])
except KeyError:
pass
if calculate_f_only:
a.calculate_f()
else:
a.calculate_age()
if self._cache:
self._cache.update(record.uuid, a)
return a
def _get_repository(self, repository_identifier, as_current=True):
if isinstance(repository_identifier, GitRepoManager):
repo = repository_identifier
else:
repo = None
if as_current:
repo = self.current_repository
path = repository_path(repository_identifier)
if repo is None or repo.path != path:
self.debug('make new repomanager for {}'.format(path))
repo = GitRepoManager()
repo.path = path
repo.open_repo(path)
if as_current:
self.current_repository = repo
return repo
def _bind_preferences(self):
prefid = 'pychron.dvc.connection'
bind_preference(self, 'favorites', '{}.favorites'.format(prefid))
self._favorites_changed(self.favorites)
self._set_meta_repo_name()
prefid = 'pychron.dvc'
bind_preference(self, 'use_cocktail_irradiation', '{}.use_cocktail_irradiation'.format(prefid))
bind_preference(self, 'use_cache', '{}.use_cache'.format(prefid))
bind_preference(self, 'max_cache_size', '{}.max_cache_size'.format(prefid))
bind_preference(self, 'update_currents_enabled', '{}.update_currents_enabled'.format(prefid))
bind_preference(self, 'use_auto_pull', '{}.use_auto_pull'.format(prefid))
prefid = 'pychron.entry'
bind_preference(self, 'irradiation_prefix', '{}.irradiation_prefix'.format(prefid))
if self.use_cache:
self._use_cache_changed()
def _max_cache_size_changed(self, new):
if new:
if self._cache:
self._cache.max_size = self.max_cache_size
else:
self._use_cache_changed()
else:
self.use_cache = False
def _use_cache_changed(self):
if self.use_cache:
self._cache = DVCCache(max_size=self.max_cache_size)
else:
self._cache = None
def _favorites_changed(self, items):
try:
ds = [DVCConnectionItem(attrs=f, load_names=False) for f in items]
self.data_sources = [d for d in ds if d.enabled]
except BaseException:
pass
if self.data_sources:
self.data_source = next((d for d in self.data_sources if d.default and d.enabled), None)
def _data_source_changed(self, old, new):
self.debug('data source changed. {}, db={}'.format(new, id(self.db)))
if new is not None:
for attr in ('username', 'password', 'host', 'kind', 'path', 'timeout'):
setattr(self.db, attr, getattr(new, attr))
self.db.name = new.dbname
self.organization = new.organization
self.meta_repo_name = new.meta_repo_name
self.meta_repo_dirname = new.meta_repo_dir
self.db.reset_connection()
if old:
self.db.connect()
self.db.create_session()
def _meta_repo_dirname_changed(self):
self._set_meta_repo_name()
def _meta_repo_name_changed(self):
self._set_meta_repo_name()
def _set_meta_repo_name(self):
name = self.meta_repo_name
if self.meta_repo_dirname:
name = self.meta_repo_dirname
paths.meta_root = os.path.join(paths.dvc_dir, name)
def _defaults(self):
self.debug('writing defaults')
self.db.add_save_user()
for tag, func in (('irradiation holders', self._add_default_irradiation_holders),
('productions', self._add_default_irradiation_productions),
('load holders', self._add_default_load_holders)):
d = os.path.join(self.meta_repo.path, tag.replace(' ', '_'))
if not os.path.isdir(d):
os.mkdir(d)
if self.auto_add:
func()
elif self.confirmation_dialog('You have no {}. Would you like to add some defaults?'.format(tag)):
func()
def _add_default_irradiation_productions(self):
ds = (('TRIGA.txt', TRIGA),)
self._add_defaults(ds, 'productions')
def _add_default_irradiation_holders(self):
ds = (('24Spokes.txt', HOLDER_24_SPOKES),)
self._add_defaults(ds, 'irradiation_holders', )
def _add_default_load_holders(self):
ds = (('221.txt', LASER221),
('65.txt', LASER65))
self._add_defaults(ds, 'load_holders', self.db.add_load_holder)
def _add_defaults(self, defaults, root, dbfunc=None):
commit = False
repo = self.meta_repo
for name, txt in defaults:
p = os.path.join(repo.path, root, name)
if not os.path.isfile(p):
with open(p, 'w') as wfile:
wfile.write(txt)
repo.add(p, commit=False)
commit = True
if dbfunc:
name = remove_extension(name)
dbfunc(name)
if commit:
repo.commit('added default {}'.format(root.replace('_', ' ')))
def __getattr__(self, item):
try:
return getattr(self.db, item)
except AttributeError:
try:
return getattr(self.meta_repo, item)
except AttributeError as e:
print(e, item)
# raise DVCException(item)
# defaults
def _db_default(self):
return DVCDatabase(kind='mysql',
username='root',
password='Argon',
host='localhost',
name='pychronmeta')
def _meta_repo_default(self):
return MetaRepo(application=self.application)
if __name__ == '__main__':
paths.build('_dev')
idn = '24138'
exps = ['Irradiation-NM-272']
print(find_interpreted_age_path(idn, exps))
# d = DVC(bind=False)
# with open('/Users/ross/Programming/githubauth.txt') as rfile:
# usr = rfile.readline().strip()
# pwd = rfile.readline().strip()
# d.github_user = usr
# d.github_password = pwd
# d.organization = 'NMGRLData'
# d.add_experiment('Irradiation-NM-273')
# ============= EOF =============================================
| {
"content_hash": "af5b37f9414bd44614692fde0b92a8fe",
"timestamp": "",
"source": "github",
"line_count": 1914,
"max_line_length": 120,
"avg_line_length": 37.47544409613375,
"alnum_prop": 0.5213305821994201,
"repo_name": "UManPychron/pychron",
"id": "8c7b1b616b2811e9a9931cf1739dd56b94b76b00",
"size": "72462",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pychron/dvc/dvc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "131"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "279"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "40346"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10234954"
},
{
"name": "Shell",
"bytes": "10753"
}
],
"symlink_target": ""
} |
import argparse
import numpy as np
from eval_proposal import ANETproposal
def main(ground_truth_filename, proposal_filename, max_avg_nr_proposals=100,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
subset='validation', verbose=True, check_status=True):
anet_proposal = ANETproposal(ground_truth_filename, proposal_filename,
tiou_thresholds=tiou_thresholds,
max_avg_nr_proposals=max_avg_nr_proposals,
subset=subset, verbose=True, check_status=True)
anet_proposal.evaluate()
def parse_input():
description = ('This script allows you to evaluate the ActivityNet '
'proposal task which is intended to evaluate the ability '
'of algorithms to generate activity proposals that temporally '
'localize activities in untrimmed video sequences.')
p = argparse.ArgumentParser(description=description)
p.add_argument('ground_truth_filename',
help='Full path to json file containing the ground truth.')
p.add_argument('proposal_filename',
help='Full path to json file containing the proposals.')
p.add_argument('--subset', default='validation',
help=('String indicating subset to evaluate: '
'(training, validation)'))
p.add_argument('--verbose', type=bool, default=True)
p.add_argument('--check_status', type=bool, default=True)
return p.parse_args()
if __name__ == '__main__':
args = parse_input()
main(**vars(args))
| {
"content_hash": "870bd40fdf53f11850954834f54e0c25",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 82,
"avg_line_length": 46.05714285714286,
"alnum_prop": 0.6215880893300249,
"repo_name": "activitynet/ActivityNet",
"id": "48c730703d95cc5115b47b83dff84116d5eef21a",
"size": "1612",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Evaluation/get_proposal_performance.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "22855212"
},
{
"name": "Python",
"bytes": "202438"
},
{
"name": "Shell",
"bytes": "374"
}
],
"symlink_target": ""
} |
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
# noqa: F401
# noqa: F401
# noqa: F401
# noqa: F401
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
'''CONSTANTS'''
BATCH_SIZE = 2000
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
DEPROVISIONED_STATUS = 'DEPROVISIONED'
USER_IS_DISABLED_MSG = 'User is already disabled.'
USER_IS_DISABLED_ERROR = 'E0000007'
ERROR_CODES_TO_SKIP = [
'E0000016', # user is already enabled
USER_IS_DISABLED_ERROR
]
ERROR_CODES_TO_RETURN_ERROR = [
'E0000047', # rate limit - resets after 1 minute
]
FETCH_QUERY_EXCEPTION_MSG = 'If you marked the "Query only application events configured in IAM Configuration" ' \
'checkbox in the instance configuration, you must add at least one application in ' \
'the IAM Configuration incident before fetching logs from Okta. ' \
'Alternatively, you can unmark this checkbox and provide a ' \
'"Fetch Query Filter" parameter instead.'
GET_USER_ATTRIBUTES = ['id', 'login', 'email']
'''CLIENT CLASS'''
class Client(BaseClient):
"""
Okta IAM Client class that implements logic to authenticate with Okta.
"""
def test_connection(self):
uri = 'users/me'
self._http_request(method='GET', url_suffix=uri)
def get_user(self, filter_name: str, filter_value: str):
filter_name = filter_name if filter_name == 'id' else f'profile.{filter_name}'
uri = 'users'
query_params = {
'filter': f'{filter_name} eq "{filter_value}"'
}
res = self._http_request(
method='GET',
url_suffix=uri,
params=query_params
)
if res and len(res) == 1:
return res[0]
return None
def deactivate_user(self, user_id):
uri = f'users/{user_id}/lifecycle/deactivate'
self._http_request(
method="POST",
url_suffix=uri
)
def activate_user(self, user_id):
query_params = {'sendEmail': 'false'}
uri = f'users/{user_id}/lifecycle/activate'
self._http_request(
method="POST",
url_suffix=uri,
params=query_params
)
def create_user(self, user_data):
# create a user in staged mode (not active)
body = {
'profile': user_data
}
uri = 'users'
query_params = {
'activate': 'false',
'provider': 'true'
}
res = self._http_request(
method='POST',
url_suffix=uri,
json_data=body,
params=query_params
)
return res
def update_user(self, user_id, user_data):
body = {
'profile': user_data
}
uri = f'users/{user_id}'
res = self._http_request(
method='POST',
url_suffix=uri,
json_data=body
)
return res
def get_okta_fields(self):
okta_fields = {}
uri = 'meta/schemas/user/default'
res = self._http_request(
method='GET',
url_suffix=uri
)
base_properties = res.get('definitions', {}).get('base', {}).get('properties', {})
okta_fields.update({k: base_properties[k].get('title') for k in base_properties.keys()})
custom_properties = res.get('definitions', {}).get('custom', {}).get('properties', {})
okta_fields.update({k: custom_properties[k].get('title') for k in custom_properties.keys()})
return okta_fields
def http_request(self, method, url_suffix, full_url=None, params=None, data=None, headers=None):
if headers is None:
headers = self._headers
full_url = full_url if full_url else urljoin(self._base_url, url_suffix)
res = requests.request(
method,
full_url,
verify=self._verify,
headers=headers,
params=params,
json=data
)
return res
def search_group(self, group_name):
uri = 'groups'
query_params = {
'q': encode_string_results(group_name)
}
return self.http_request(
method="GET",
url_suffix=uri,
params=query_params
)
def get_group_by_id(self, group_id):
uri = f'groups/{group_id}'
return self.http_request(
method='GET',
url_suffix=uri
)
def get_group_members(self, group_id):
uri = f'groups/{group_id}/users'
return self.get_paged_results(uri)
def get_paged_results(self, uri, query_param=None):
response = self.http_request(
method="GET",
url_suffix=uri,
params=query_param
)
paged_results = response.json()
if response.status_code != 200:
raise Exception(
f'Error occurred while calling Okta API: {response.request.url}. Response: {response.json()}')
while "next" in response.links and len(response.json()) > 0:
next_page = response.links.get("next").get("url")
response = self._http_request(
method="GET",
full_url=next_page,
url_suffix=''
)
if response.status_code != 200:
raise Exception(
f'Error occurred while calling Okta API: {response.request.url}. Response: {response.json()}')
paged_results += response.json()
return paged_results
def get_app_user_assignment(self, application_id, user_id):
uri = f'/apps/{application_id}/users/{user_id}'
res = self._http_request(
method='GET',
url_suffix=uri,
resp_type='response',
ok_codes=(200, 404)
)
return res
def list_user_apps(self, user_id):
uri = 'apps'
query_params = {
'filter': f'user.id eq "{user_id}"'
}
res = self._http_request(
method='GET',
url_suffix=uri,
params=query_params
)
return res
def list_apps(self, query, page, limit):
query_params = {
'q': query,
'limit': limit
}
curr_page = 0
apps_batch, next_page = self.list_apps_batch(url_suffix='/apps', params=query_params)
while apps_batch and curr_page != page:
curr_page += 1
apps_batch, next_page = self.list_apps_batch(full_url=next_page)
if not apps_batch:
apps_batch = []
return apps_batch
def list_apps_batch(self, url_suffix='', params=None, full_url=''):
""" Gets a batch of apps from Okta.
Args:
url_suffix (str): The apps API endpoint.
params (dict): The API query params.
full_url (str): The full url retrieved from the last API call.
Return:
apps_batch (dict): The logs batch.
next_page (str): URL for next API call (equals '' on last batch).
"""
if not url_suffix and not full_url:
return None, None
res = self._http_request(
method='GET',
url_suffix=url_suffix,
params=params,
full_url=full_url,
resp_type='response'
)
logs_batch = res.json()
next_page = res.links.get('next', {}).get('url')
return logs_batch, next_page
def get_logs(self, next_page=None, last_run_time=None, time_now=None,
query_filter=None, auto_generate_filter=False, context=None):
logs = []
uri = 'logs'
if auto_generate_filter:
query_filter = get_query_filter(context)
params = {
'filter': query_filter,
'since': last_run_time,
'until': time_now
}
logs_batch, next_page = self.get_logs_batch(url_suffix=uri, params=params, full_url=next_page)
try:
while logs_batch:
logs.extend(logs_batch)
logs_batch, next_page = self.get_logs_batch(full_url=next_page)
except DemistoException as e:
# in case of too many API calls, we return what we got and save the next_page for next fetch
if not is_rate_limit_error(e):
raise e
return logs, next_page
def get_logs_batch(self, url_suffix='', params=None, full_url=''):
""" Gets a batch of logs from Okta.
Args:
url_suffix (str): The logs API endpoint.
params (dict): The API query params.
full_url (str): The full url retrieved from the last API call. Preferred over url_suffix if not empty.
Return:
logs_batch (dict): The logs batch.
next_page (str): URL for next API call (equals '' on last batch).
"""
if not url_suffix and not full_url:
return None, None
res = self._http_request(
method='GET',
url_suffix=url_suffix,
params=params,
full_url=full_url,
resp_type='response'
)
logs_batch = res.json()
next_page = res.links.get('next', {}).get('url')
return logs_batch, next_page
'''HELPER FUNCTIONS'''
def get_all_user_profiles():
query = 'type:"User Profile"'
email_to_user_profile = {}
user_profiles: List[dict] = []
search_indicators = IndicatorsSearcher(query=query, size=BATCH_SIZE)
for user_profile_res in search_indicators:
user_profiles.extend(user_profile_res.get('iocs') or [])
for user_profile in user_profiles:
user_profile = user_profile.get('CustomFields', {})
email_to_user_profile[user_profile.get('email')] = user_profile
return email_to_user_profile
def get_event_username(log_entry):
for target in log_entry.get('target', []):
if target.get('type') == 'User':
return target.get('alternateId')
return None
def should_drop_event(log_entry, email_to_user_profile):
""" Returns a boolean value indicates whether the incident should be dropped.
Args:
log_entry (dict): The log entry.
Returns:
(bool) True iff the event should be dropped.
"""
username = get_event_username(log_entry)
if username is not None and email_to_user_profile.get(username) is None:
demisto.info(f'Dropping incident for user with username {username} - '
f'User Profile does not exist in XSOAR.')
return True
return False
def add_user_profile_data_to_entry(log_entry, email_to_user_profile):
username = get_event_username(log_entry)
user_profile = email_to_user_profile.get(username, {})
log_entry.update(user_profile)
log_entry['UserProfile'] = user_profile
def get_query_filter(context):
iam_configuration = context.get('IAMConfiguration', [])
if not iam_configuration:
raise DemistoException(FETCH_QUERY_EXCEPTION_MSG)
application_ids = [row['ApplicationID'] for row in iam_configuration]
query_filter = '(eventType eq "application.user_membership.add" ' \
'or eventType eq "application.user_membership.remove") and'
query_filter += '(' + ' or '.join([f'target.id co "{app_id}"' for app_id in application_ids]) + ')'
return query_filter
def is_rate_limit_error(e):
if hasattr(e, 'res') and e.res is not None:
return e.res.status_code == 429
return False
def handle_exception(user_profile, e, action, okta_user=None):
""" Handles failed responses from Okta API by setting the User Profile object with the results.
Args:
user_profile (IAMUserProfile): The User Profile object.
e (Exception): The exception error. If DemistoException, holds the response json.
action (IAMActions): An enum represents the current action (get, update, create, etc).
"""
if e.__class__ is DemistoException and hasattr(e, 'res') and e.res is not None:
try:
resp = e.res.json()
error_code = resp.get('errorCode')
error_message = get_error_details(resp)
except ValueError:
error_code = e.res.status_code
error_message = str(e)
else:
error_code = ''
error_message = str(e)
if error_code == USER_IS_DISABLED_ERROR:
user_profile.set_user_is_already_disabled(okta_user)
elif error_code in ERROR_CODES_TO_SKIP:
user_profile.set_result(action=action,
skip=True,
skip_reason=error_message)
else:
should_return_error = error_code in ERROR_CODES_TO_RETURN_ERROR
user_profile.set_result(action=action,
success=False,
return_error=should_return_error,
error_code=error_code,
error_message=error_message)
demisto.error(traceback.format_exc())
def get_error_details(res):
""" Parses the error details retrieved from Okta and outputs the resulted string.
Args:
res (dict): The data retrieved from Okta.
Returns:
(str) The parsed error details.
"""
error_msg = f'{res.get("errorSummary")}. '
causes = ''
for idx, cause in enumerate(res.get('errorCauses', []), 1):
causes += f'{idx}. {cause.get("errorSummary")}\n'
if causes:
error_msg += f'Reason:\n{causes}'
return error_msg
'''COMMAND FUNCTIONS'''
def test_module(client, is_fetch, fetch_query_filter, auto_generate_query_filter, context, first_fetch_str):
if is_fetch:
if auto_generate_query_filter:
get_query_filter(context) # will raise an exception if configuration doesn't exist
elif not fetch_query_filter:
raise DemistoException(FETCH_QUERY_EXCEPTION_MSG)
try:
dateparser.parse(first_fetch_str).strftime(DATE_FORMAT) # type: ignore
except AttributeError:
raise DemistoException('First fetch timestamp parameter is not in the correct format.')
client.test_connection()
return_results('ok')
def get_mapping_fields_command(client):
okta_fields = client.get_okta_fields()
incident_type_scheme = SchemeTypeMapping(type_name=IAMUserProfile.DEFAULT_INCIDENT_TYPE)
for field, description in okta_fields.items():
incident_type_scheme.add_field(field, description)
return GetMappingFieldsResponse([incident_type_scheme])
def get_user_command(client, args, mapper_in, mapper_out):
user_profile = IAMUserProfile(user_profile=args.get('user-profile'), mapper=mapper_out,
incident_type=IAMUserProfile.UPDATE_INCIDENT_TYPE)
try:
iam_attr, iam_attr_value = user_profile.get_first_available_iam_user_attr(GET_USER_ATTRIBUTES)
okta_user = client.get_user(iam_attr, iam_attr_value)
if not okta_user:
error_code, error_message = IAMErrors.USER_DOES_NOT_EXIST
user_profile.set_result(action=IAMActions.GET_USER,
success=False,
error_code=error_code,
error_message=error_message)
else:
user_profile.update_with_app_data(okta_user, mapper_in)
user_profile.set_result(
action=IAMActions.GET_USER,
success=True,
active=False if okta_user.get('status') == DEPROVISIONED_STATUS else True,
iden=okta_user.get('id'),
email=okta_user.get('profile', {}).get('email'),
username=okta_user.get('profile', {}).get('login'),
details=okta_user
)
except Exception as e:
handle_exception(user_profile, e, IAMActions.GET_USER)
return user_profile
def disable_user_command(client, args, is_command_enabled, mapper_out):
user_profile = IAMUserProfile(user_profile=args.get('user-profile'), mapper=mapper_out,
incident_type=IAMUserProfile.UPDATE_INCIDENT_TYPE)
okta_user = None
if not is_command_enabled:
user_profile.set_result(action=IAMActions.DISABLE_USER,
skip=True,
skip_reason='Command is disabled.')
else:
try:
iam_attr, iam_attr_value = user_profile.get_first_available_iam_user_attr(GET_USER_ATTRIBUTES)
okta_user = client.get_user(iam_attr, iam_attr_value)
if not okta_user:
_, error_message = IAMErrors.USER_DOES_NOT_EXIST
user_profile.set_result(action=IAMActions.DISABLE_USER,
skip=True,
skip_reason=error_message)
else:
client.deactivate_user(okta_user.get('id'))
user_profile.set_result(
action=IAMActions.DISABLE_USER,
success=True,
active=False,
iden=okta_user.get('id'),
email=okta_user.get('profile', {}).get('email'),
username=okta_user.get('profile', {}).get('login'),
details=okta_user
)
except Exception as e:
handle_exception(user_profile, e, IAMActions.DISABLE_USER, okta_user)
return user_profile
def create_user_command(client, args, mapper_out, is_command_enabled, is_update_user_enabled, is_enable_enabled):
user_profile = IAMUserProfile(user_profile=args.get('user-profile'), mapper=mapper_out,
incident_type=IAMUserProfile.CREATE_INCIDENT_TYPE)
if not is_command_enabled:
user_profile.set_result(action=IAMActions.CREATE_USER,
skip=True,
skip_reason='Command is disabled.')
else:
try:
iam_attr, iam_attr_value = user_profile.get_first_available_iam_user_attr(GET_USER_ATTRIBUTES)
okta_user = client.get_user(iam_attr, iam_attr_value)
if okta_user:
# if user exists, update its data
return update_user_command(client, args, mapper_out, is_update_user_enabled, is_enable_enabled,
is_create_user_enabled=False, create_if_not_exists=False)
else:
okta_profile = user_profile.map_object(mapper_out, incident_type=IAMUserProfile.CREATE_INCIDENT_TYPE)
created_user = client.create_user(okta_profile)
client.activate_user(created_user.get('id'))
user_profile.set_result(
action=IAMActions.CREATE_USER,
success=True,
active=False if created_user.get('status') == DEPROVISIONED_STATUS else True,
iden=created_user.get('id'),
email=created_user.get('profile', {}).get('email'),
username=created_user.get('profile', {}).get('login'),
details=created_user
)
except Exception as e:
handle_exception(user_profile, e, IAMActions.CREATE_USER)
return user_profile
def update_user_command(client, args, mapper_out, is_command_enabled, is_enable_enabled,
is_create_user_enabled, create_if_not_exists):
user_profile = IAMUserProfile(user_profile=args.get('user-profile'), mapper=mapper_out,
incident_type=IAMUserProfile.UPDATE_INCIDENT_TYPE)
allow_enable = args.get('allow-enable') == 'true'
if not is_command_enabled:
user_profile.set_result(action=IAMActions.UPDATE_USER,
skip=True,
skip_reason='Command is disabled.')
else:
try:
iam_attr, iam_attr_value = user_profile.get_first_available_iam_user_attr(GET_USER_ATTRIBUTES,
use_old_user_data=True)
okta_user = client.get_user(iam_attr, iam_attr_value)
if okta_user:
user_id = okta_user.get('id')
if allow_enable and is_enable_enabled and okta_user.get('status') == DEPROVISIONED_STATUS:
client.activate_user(user_id)
user_profile.set_result(
action=IAMActions.ENABLE_USER,
success=True,
active=True,
iden=okta_user.get('id'),
email=okta_user.get('profile', {}).get('email'),
username=okta_user.get('profile', {}).get('login'),
details=okta_user
)
else:
okta_profile = user_profile.map_object(mapper_out,
incident_type=IAMUserProfile.UPDATE_INCIDENT_TYPE)
updated_user = client.update_user(user_id, okta_profile)
user_profile.set_result(
action=IAMActions.UPDATE_USER,
success=True,
active=False if okta_user.get('status') == DEPROVISIONED_STATUS else True,
iden=updated_user.get('id'),
email=updated_user.get('profile', {}).get('email'),
username=updated_user.get('profile', {}).get('login'),
details=updated_user
)
else:
if create_if_not_exists:
return create_user_command(client, args, mapper_out, is_create_user_enabled, False, False)
else:
_, error_message = IAMErrors.USER_DOES_NOT_EXIST
user_profile.set_result(action=IAMActions.UPDATE_USER,
skip=True,
skip_reason=error_message)
except Exception as e:
handle_exception(user_profile, e, IAMActions.UPDATE_USER)
return user_profile
def get_app_user_assignment_command(client, args):
user_id = args.get('user_id')
application_id = args.get('application_id')
res = client.get_app_user_assignment(application_id, user_id)
raw_response = res.json()
is_user_assigned_to_app = res.status_code == 200
outputs = {
'UserID': user_id,
'AppID': application_id,
'IsAssigned': is_user_assigned_to_app
}
readable_output = tableToMarkdown('App User Assignment', outputs,
headers=['UserID', 'AppID', 'IsAssigned'],
headerTransform=pascalToSpace)
if is_user_assigned_to_app:
outputs['ProfileInApp'] = raw_response.get('profile')
profile_readable = tableToMarkdown('Profile in App', raw_response.get('profile'), removeNull=True)
readable_output += f'\n{profile_readable}'
return CommandResults(
outputs=outputs,
outputs_prefix='Okta.AppUserAssignment',
outputs_key_field=['UserID', 'AppID'],
readable_output=readable_output,
raw_response=raw_response
)
def list_apps_command(client, args):
query = args.get('query')
page = int(args.get('page'))
limit = min(int(args.get('limit')), 200)
applications = client.list_apps(query, page, limit)
outputs = []
for app in applications:
outputs.append({
'ID': app.get('id'),
'Name': app.get('name'),
'Label': app.get('label'),
'Logo': f".get('logo', [{}])[0].get('href')})"
})
title = 'Okta Applications'
if applications:
from_idx = page * limit + 1
to_idx = from_idx + len(applications) - 1
title += f' ({from_idx} - {to_idx})'
return CommandResults(
outputs=outputs,
outputs_prefix='Okta.Application',
outputs_key_field='ID',
readable_output=tableToMarkdown(title, outputs, headers=['ID', 'Name', 'Label', 'Logo'])
)
def list_user_apps_command(client, args):
user_id = args.get('user_id')
applications = client.list_user_apps(user_id)
outputs = []
for app in applications:
outputs.append({
'ID': app.get('id'),
'Name': app.get('name'),
'Label': app.get('label'),
'Status': app.get('status')
})
title = 'Okta User Applications'
return CommandResults(
outputs=outputs,
outputs_prefix='Okta.Application',
outputs_key_field='ID',
readable_output=tableToMarkdown(title, outputs, headers=['ID', 'Name', 'Label', 'Status'])
)
def get_configuration(context):
iam_configuration = context.get('IAMConfiguration', [])
return CommandResults(
outputs=iam_configuration,
outputs_prefix='Okta.IAMConfiguration',
outputs_key_field='ApplicationID',
readable_output=tableToMarkdown('Okta IAM Configuration', iam_configuration)
)
def set_configuration(args):
iam_configuration = json.loads(args.get('configuration'))
context = {'IAMConfiguration': iam_configuration}
return context
def fetch_incidents(client, last_run, first_fetch_str, fetch_limit, query_filter=None,
auto_generate_filter=False, context=None):
""" If no events were saved from last run, returns new events from Okta's /log API. Otherwise,
returns the events from last run. In both cases, no more than `fetch_limit` incidents will be returned,
and the rest of them will be saved for next run.
Args:
client: (BaseClient) Okta client.
last_run: (dict) The "last run" object that was set on the previous run.
first_fetch_str: (str) First fetch time parameter (e.g. "1 day", "2 months", etc).
fetch_limit: (int) Maximum number of incidents to return.
query_filter: (str) Logs API query filter.
auto_generate_filter: (bool) Whether or not to automatically generate the query filter.
context: (dict) Integration Context object.
Returns:
incidents: (dict) Incidents/events that will be created in Cortex XSOAR
next_run: (dict) The "last run" object for the next run.
"""
incidents = last_run.get('incidents', [])
last_run_full_url = last_run.get('last_run_full_url')
first_fetch_date = dateparser.parse(first_fetch_str)
assert first_fetch_date is not None, f'could not parse {first_fetch_str}'
first_fetch = first_fetch_date.strftime(DATE_FORMAT)
last_run_time = last_run.get('last_run_time', first_fetch) # if last_run_time is undefined, use first_fetch
time_now = datetime.now().strftime(DATE_FORMAT)
demisto.debug(f'Okta: Fetching logs from {last_run_time} to {time_now}.')
if not incidents:
email_to_user_profile = get_all_user_profiles()
log_events, last_run_full_url = client.get_logs(last_run_full_url, last_run_time, time_now,
query_filter, auto_generate_filter, context)
for entry in log_events:
if not should_drop_event(entry, email_to_user_profile):
add_user_profile_data_to_entry(entry, email_to_user_profile)
incident = {'rawJSON': json.dumps(entry)}
incidents.append(incident)
next_run = {
'incidents': incidents[fetch_limit:],
'last_run_time': time_now,
'last_run_full_url': last_run_full_url
}
return incidents[:fetch_limit], next_run
class OutputContext:
"""
Class to build a generic output and context.
"""
def __init__(self, success=None, active=None, id=None, username=None, email=None, errorCode=None,
errorMessage=None, details=None, displayName=None, members=None):
self.instanceName = demisto.callingContext['context']['IntegrationInstance']
self.brand = demisto.callingContext['context']['IntegrationBrand']
self.command = demisto.command().replace('-', '_').title().replace('_', '')
self.success = success
self.active = active
self.id = id
self.username = username
self.email = email
self.errorCode = errorCode
self.errorMessage = errorMessage
self.details = details
self.displayName = displayName # Used in group
self.members = members # Used in group
self.data = {
"brand": self.brand,
"instanceName": self.instanceName,
"success": success,
"active": active,
"id": id,
"username": username,
"email": email,
"errorCode": errorCode,
"errorMessage": errorMessage,
"details": details,
"displayName": displayName,
"members": members
}
# Remoove empty values
self.data = {
k: v
for k, v in self.data.items()
if v is not None
}
def get_group_command(client, args):
scim = safe_load_json(args.get('scim'))
group_id = scim.get('id')
group_name = scim.get('displayName')
if not (group_id or group_name):
return_error("You must supply either 'id' or 'displayName' in the scim data")
group_search_result = None
if not group_id:
res = client.search_group(group_name)
res_json = res.json()
if res.status_code == 200:
if len(res_json) < 1:
generic_iam_context = OutputContext(success=False, displayName=group_name, errorCode=404,
errorMessage="Group Not Found", details=res_json)
else:
group_search_result = res_json
else:
generic_iam_context = OutputContext(success=False, displayName=group_name, id=group_id,
errorCode=res_json.get('errorCode'),
errorMessage=res_json.get('errorSummary'),
details=res_json)
if not group_search_result:
return CommandResults(
raw_response=generic_iam_context.data,
outputs_prefix=generic_iam_context.command,
outputs_key_field='id',
outputs=generic_iam_context.data,
readable_output=tableToMarkdown('Okta Get Group:', generic_iam_context.data, removeNull=True)
)
if group_search_result and len(group_search_result) > 1:
generic_iam_context_data_list = []
for group in group_search_result:
group_name = group.get('profile', {}).get('name')
generic_iam_context = OutputContext(success=True, id=group.get('id'), displayName=group_name)
generic_iam_context_data_list.append(generic_iam_context.data)
return CommandResults(
raw_response=generic_iam_context_data_list,
outputs_prefix=generic_iam_context.command,
outputs_key_field='id',
outputs=generic_iam_context_data_list,
readable_output=tableToMarkdown('Okta Get Group:', generic_iam_context_data_list, removeNull=True)
)
elif not group_id and isinstance(group_search_result, list):
group_id = group_search_result[0].get('id')
res = client.get_group_by_id(group_id)
res_json = res.json()
if res.status_code == 200:
group_member_profiles = []
include_members = args.get('includeMembers')
if include_members.lower() == 'true':
group_members = client.get_group_members(group_id)
for member in group_members:
if member.get('status') != DEPROVISIONED_STATUS:
profile = member.get('profile', {})
group_member_profile = {
"value": member.get('id'),
"display": profile.get('login')
}
group_member_profiles.append(group_member_profile)
generic_iam_context = OutputContext(success=True, id=res_json.get('id'),
displayName=res_json.get('profile', {}).get('name'),
members=group_member_profiles)
elif res.status_code == 404:
generic_iam_context = OutputContext(success=False, displayName=group_name, id=group_id, errorCode=404,
errorMessage="Group Not Found", details=res_json)
else:
generic_iam_context = OutputContext(success=False, displayName=group_name, id=group_id,
errorCode=res_json.get('errorCode'),
errorMessage=res_json.get('errorSummary'),
details=res_json)
return CommandResults(
raw_response=generic_iam_context.data,
outputs_prefix=generic_iam_context.command,
outputs_key_field='id',
outputs=generic_iam_context.data,
readable_output=tableToMarkdown('Okta Get Group:', generic_iam_context.data, removeNull=True)
)
def get_logs_command(client, args):
filter = args.get('filter')
since = args.get('since')
until = args.get('until')
log_events, _ = client.get_logs(query_filter=filter, last_run_time=since, time_now=until)
return CommandResults(
raw_response=log_events,
outputs_prefix='Okta.Logs.Events',
outputs_key_field='uuid',
outputs=log_events,
readable_output=tableToMarkdown('Okta Log Events:', log_events)
)
def main():
user_profile = None
params = demisto.params()
base_url = urljoin(params['url'].strip('/'), '/api/v1/')
token = params.get('apitoken')
mapper_in = params.get('mapper-in')
mapper_out = params.get('mapper-out')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
command = demisto.command()
args = demisto.args()
is_create_enabled = params.get("create-user-enabled")
is_enable_enabled = params.get("enable-user-enabled")
is_disable_enabled = params.get("disable-user-enabled")
is_update_enabled = demisto.params().get("update-user-enabled")
create_if_not_exists = demisto.params().get("create-if-not-exists")
is_fetch = params.get('isFetch')
first_fetch_str = params.get('first_fetch')
fetch_limit = int(params.get('max_fetch', 1))
auto_generate_query_filter = params.get('auto_generate_query_filter')
fetch_query_filter = params.get('fetch_query_filter')
context = demisto.getIntegrationContext()
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': f'SSWS {token}'
}
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
headers=headers,
ok_codes=(200,)
)
demisto.debug(f'Command being called is {command}')
if command == 'iam-get-user':
user_profile = get_user_command(client, args, mapper_in, mapper_out)
elif command == 'iam-create-user':
user_profile = create_user_command(client, args, mapper_out, is_create_enabled,
is_update_enabled, is_enable_enabled)
elif command == 'iam-update-user':
user_profile = update_user_command(client, args, mapper_out, is_update_enabled, is_enable_enabled,
is_create_enabled, create_if_not_exists)
elif command == 'iam-disable-user':
user_profile = disable_user_command(client, args, is_disable_enabled, mapper_out)
if user_profile:
return_results(user_profile)
try:
if command == 'test-module':
test_module(client, is_fetch, fetch_query_filter, auto_generate_query_filter, context, first_fetch_str)
elif command == 'get-mapping-fields':
return_results(get_mapping_fields_command(client))
elif command == 'okta-get-app-user-assignment':
return_results(get_app_user_assignment_command(client, args))
elif command == 'okta-iam-list-applications':
return_results(list_apps_command(client, args))
elif command == 'okta-iam-list-user-applications':
return_results(list_user_apps_command(client, args))
elif command == 'okta-iam-get-configuration':
return_results(get_configuration(context))
elif command == 'okta-iam-set-configuration':
context = set_configuration(args)
demisto.setIntegrationContext(context)
elif command == 'iam-get-group':
return_results(get_group_command(client, args))
elif command == 'okta-get-logs':
return_results(get_logs_command(client, args))
elif command == 'fetch-incidents':
last_run = demisto.getLastRun()
context = demisto.getIntegrationContext()
incidents, next_run = fetch_incidents(client, last_run, first_fetch_str, fetch_limit,
fetch_query_filter, auto_generate_query_filter, context)
demisto.incidents(incidents)
demisto.setLastRun(next_run)
except Exception as e:
# For any other integration command exception, return an error
return_error(f'Failed to execute {command} command. Error: {str(e)}')
from IAMApiModule import * # noqa: E402
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| {
"content_hash": "7b1b8cfb2db5b8cdd83a510078e05b5f",
"timestamp": "",
"source": "github",
"line_count": 1020,
"max_line_length": 118,
"avg_line_length": 37.42745098039216,
"alnum_prop": 0.5713537300922046,
"repo_name": "VirusTotal/content",
"id": "d8e53a5b841031a36aa4b80e19a1b46cf2db2857",
"size": "38176",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/Okta/Integrations/Okta_IAM/Okta_IAM.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47594464"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 5, transform = "None", sigma = 0.0, exog_count = 100, ar_order = 0); | {
"content_hash": "fc65a6f30c4dc2aba2affd24d3b427be",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 161,
"avg_line_length": 37.285714285714285,
"alnum_prop": 0.7011494252873564,
"repo_name": "antoinecarme/pyaf",
"id": "b5d67a905e15287872748bd380781895dd8baa56",
"size": "261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_None/trend_PolyTrend/cycle_5/ar_/test_artificial_1024_None_PolyTrend_5__100.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
"""
Package for interacting on the network at a high level.
"""
import random
import pickle
import asyncio
from logging import getLogger
from kademlia.protocol import KademliaProtocol
from kademlia.utils import digest
from kademlia.storage import ForgetfulStorage
from kademlia.node import Node
from kademlia.crawling import ValueSpiderCrawl
from kademlia.crawling import NodeSpiderCrawl
class Server(object):
"""
High level view of a node instance. This is the object that should be created
to start listening as an active node on the network.
"""
def __init__(self, ksize=20, alpha=3, id=None, storage=None):
"""
Create a server instance. This will start listening on the given port.
Args:
ksize (int): The k parameter from the paper
alpha (int): The alpha parameter from the paper
id: The id for this node on the network.
storage: An instance that implements :interface:`~kademlia.storage.IStorage`
"""
self.ksize = ksize
self.alpha = alpha
self.log = getLogger("kademlia-server")
self.storage = storage or ForgetfulStorage()
self.node = Node(id or digest(random.getrandbits(255)))
self.transport = None
self.protocol = None
self.refresh_loop = None
def stop(self):
if self.refresh_loop is not None:
self.refresh_loop.cancel()
if self.transport is not None:
self.transport.close()
def listen(self, port, interface='0.0.0.0'):
"""
Start listening on the given port.
Provide interface="::" to accept ipv6 address
"""
proto_factory = lambda: KademliaProtocol(self.node, self.storage, self.ksize)
loop = asyncio.get_event_loop()
listen = loop.create_datagram_endpoint(proto_factory, local_addr=(interface, port))
self.transport, self.protocol = loop.run_until_complete(listen)
# finally, schedule refreshing table
self.refresh_table()
def refresh_table(self):
asyncio.ensure_future(self._refresh_table())
loop = asyncio.get_event_loop()
self.refresh_loop = loop.call_later(3600, self.refresh_table)
async def _refresh_table(self):
"""
Refresh buckets that haven't had any lookups in the last hour
(per section 2.3 of the paper).
"""
ds = []
for id in self.protocol.getRefreshIDs():
node = Node(id)
nearest = self.protocol.router.findNeighbors(node, self.alpha)
spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)
ds.append(spider.find())
# do our crawling
await asyncio.gather(*ds)
# now republish keys older than one hour
for key, value in self.storage.iteritemsOlderThan(3600):
await self.set(key, value)
def bootstrappableNeighbors(self):
"""
Get a :class:`list` of (ip, port) :class:`tuple` pairs suitable for use as an argument
to the bootstrap method.
The server should have been bootstrapped
already - this is just a utility for getting some neighbors and then
storing them if this server is going down for a while. When it comes
back up, the list of nodes can be used to bootstrap.
"""
neighbors = self.protocol.router.findNeighbors(self.node)
return [ tuple(n)[-2:] for n in neighbors ]
async def bootstrap(self, addrs):
"""
Bootstrap the server by connecting to other known nodes in the network.
Args:
addrs: A `list` of (ip, port) `tuple` pairs. Note that only IP addresses
are acceptable - hostnames will cause an error.
"""
cos = list(map(self.bootstrap_node, addrs))
nodes = [node for node in await asyncio.gather(*cos) if not node is None]
spider = NodeSpiderCrawl(self.protocol, self.node, nodes, self.ksize, self.alpha)
return await spider.find()
async def bootstrap_node(self, addr):
result = await self.protocol.ping(addr, self.node.id)
return Node(result[1], addr[0], addr[1]) if result[0] else None
def inetVisibleIP(self):
"""
Get the internet visible IP's of this node as other nodes see it.
Returns:
A `list` of IP's. If no one can be contacted, then the `list` will be empty.
"""
def handle(results):
ips = [ result[1][0] for result in results if result[0] ]
self.log.debug("other nodes think our ip is %s" % str(ips))
return ips
ds = []
for neighbor in self.bootstrappableNeighbors():
ds.append(self.protocol.stun(neighbor))
return defer.gatherResults(ds).addCallback(handle)
async def get(self, key):
"""
Get a key if the network has it.
Returns:
:class:`None` if not found, the value otherwise.
"""
dkey = digest(key)
# if this node has it, return it
if self.storage.get(dkey) is not None:
return self.storage.get(dkey)
node = Node(dkey)
nearest = self.protocol.router.findNeighbors(node)
if len(nearest) == 0:
self.log.warning("There are no known neighbors to get key %s" % key)
return None
spider = ValueSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)
return await spider.find()
async def set(self, key, value):
"""
Set the given key to the given value in the network.
"""
self.log.debug("setting '%s' = '%s' on network" % (key, value))
dkey = digest(key)
node = Node(dkey)
nearest = self.protocol.router.findNeighbors(node)
if len(nearest) == 0:
self.log.warning("There are no known neighbors to set key %s" % key)
return False
spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)
nodes = await spider.find()
self.log.info("setting '%s' on %s" % (key, list(map(str, nodes))))
# if this node is close too, then store here as well
if self.node.distanceTo(node) < max([n.distanceTo(node) for n in nodes]):
self.storage[dkey] = value
ds = [self.protocol.callStore(n, dkey, value) for n in nodes]
# return true only if at least one store call succeeded
return any(await asyncio.gather(*ds))
def saveState(self, fname):
"""
Save the state of this node (the alpha/ksize/id/immediate neighbors)
to a cache file with the given fname.
"""
data = { 'ksize': self.ksize,
'alpha': self.alpha,
'id': self.node.id,
'neighbors': self.bootstrappableNeighbors() }
if len(data['neighbors']) == 0:
self.log.warning("No known neighbors, so not writing to cache.")
return
with open(fname, 'w') as f:
pickle.dump(data, f)
@classmethod
def loadState(self, fname):
"""
Load the state of this node (the alpha/ksize/id/immediate neighbors)
from a cache file with the given fname.
"""
with open(fname, 'r') as f:
data = pickle.load(f)
s = Server(data['ksize'], data['alpha'], data['id'])
if len(data['neighbors']) > 0:
s.bootstrap(data['neighbors'])
return s
def saveStateRegularly(self, fname, frequency=600):
"""
Save the state of node with a given regularity to the given
filename.
Args:
fname: File name to save retularly to
frequencey: Frequency in seconds that the state should be saved.
By default, 10 minutes.
"""
loop = LoopingCall(self.saveState, fname)
loop.start(frequency)
return loop
| {
"content_hash": "ab0407a6a93ad00deea8a0bce1196d38",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 94,
"avg_line_length": 36.986111111111114,
"alnum_prop": 0.6035799223932907,
"repo_name": "faisalburhanudin/kademlia",
"id": "8dcaf683c4ad2633c1cabe4f50d38e32098a6566",
"size": "7989",
"binary": false,
"copies": "1",
"ref": "refs/heads/asyncio",
"path": "kademlia/network.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "203"
},
{
"name": "Python",
"bytes": "42370"
}
],
"symlink_target": ""
} |
from oslo_config import cfg
from oslo_middleware import cors
def set_config_defaults():
"""This method updates all configuration default values."""
set_cors_middleware_defaults()
def set_cors_middleware_defaults():
"""Update default configuration options for oslo.middleware."""
# CORS Defaults
# TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/
cfg.set_defaults(cors.CORS_OPTS,
allow_headers=['X-Auth-Token',
'X-Identity-Status',
'X-Roles',
'X-Service-Catalog',
'X-User-Id',
'X-Tenant-Id',
'X-OpenStack-Request-ID'],
expose_headers=['X-Auth-Token',
'X-Subject-Token',
'X-Service-Token',
'X-OpenStack-Request-ID'],
allow_methods=['GET',
'PUT',
'POST',
'DELETE',
'PATCH']
)
| {
"content_hash": "500f9c289b1e8dac14f0aee931f6174b",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 76,
"avg_line_length": 41.225806451612904,
"alnum_prop": 0.39593114241001565,
"repo_name": "muraliselva10/cloudkitty",
"id": "534eb58bcb5ba0671f14892653bdfa91fbc8df60",
"size": "1927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudkitty/common/defaults.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "2060"
},
{
"name": "Python",
"bytes": "526205"
},
{
"name": "Shell",
"bytes": "12562"
}
],
"symlink_target": ""
} |
from __future__ import division
import argparse
import resource
import gc
import av
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--count', type=int, default=5)
parser.add_argument('-f', '--frames', type=int, default=100)
parser.add_argument('--print', dest='print_', action='store_true')
parser.add_argument('--to-rgb', action='store_true')
parser.add_argument('--to-image', action='store_true')
parser.add_argument('--gc', '-g', action='store_true')
parser.add_argument('input')
args = parser.parse_args()
def format_bytes(n):
order = 0
while n > 1024:
order += 1
n //= 1024
return '%d%sB' % (n, ('', 'k', 'M', 'G', 'T', 'P')[order])
usage = []
for round_ in xrange(args.count):
print 'Round %d/%d:' % (round_ + 1, args.count)
if args.gc:
gc.collect()
usage.append(resource.getrusage(resource.RUSAGE_SELF))
fh = av.open(args.input)
vs = next(s for s in fh.streams if s.type == 'video')
fi = 0
for packet in fh.demux([vs]):
for frame in packet.decode():
if args.print_:
print frame
if args.to_rgb:
print frame.to_rgb()
if args.to_image:
print frame.to_image()
fi += 1
if fi > args.frames:
break
frame = packet = fh = vs = None
usage.append(resource.getrusage(resource.RUSAGE_SELF))
for i in xrange(len(usage) - 1):
before = usage[i]
after = usage[i + 1]
print '%s (%s)' % (format_bytes(after.ru_maxrss), format_bytes(after.ru_maxrss - before.ru_maxrss))
| {
"content_hash": "20d75b40a08d6335b9b5d0d9d118834c",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 103,
"avg_line_length": 24.953125,
"alnum_prop": 0.5817157169693175,
"repo_name": "markreidvfx/PyAV",
"id": "4f20c7275ea6722f50d2428a0d4ca0dc2f1173da",
"size": "1597",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/resource_use.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "C",
"bytes": "5907"
},
{
"name": "Makefile",
"bytes": "1681"
},
{
"name": "PowerShell",
"bytes": "5897"
},
{
"name": "Python",
"bytes": "290925"
},
{
"name": "Shell",
"bytes": "5260"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, division, absolute_import
import logging
import re
import random
from flexget import plugin
from flexget.event import event
log = logging.getLogger('torrent_cache')
MIRRORS = ['http://torrage.com/torrent/',
'https://torcache.net/torrent/',
'http://zoink.it/torrent/']
class TorrentCache(object):
"""Adds urls to torrent cache sites to the urls list."""
@plugin.priority(120)
def on_task_urlrewrite(self, task, config):
for entry in task.accepted:
info_hash = None
if entry['url'].startswith('magnet:'):
info_hash_search = re.search('btih:([0-9a-f]+)', entry['url'], re.IGNORECASE)
if info_hash_search:
info_hash = info_hash_search.group(1)
elif entry.get('torrent_info_hash'):
info_hash = entry['torrent_info_hash']
if info_hash:
entry.setdefault('urls', [entry['url']])
urls = set(host + info_hash.upper() + '.torrent' for host in MIRRORS)
# Don't add any duplicate addresses
urls = list(urls - set(entry['urls']))
# Add the cache mirrors in a random order
random.shuffle(urls)
entry['urls'].extend(urls)
@event('plugin.register')
def register_plugin():
plugin.register(TorrentCache, 'torrent_cache', api_ver=2, builtin=True)
| {
"content_hash": "1cca4eee07aff31ee612ecdf5dba7949",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 93,
"avg_line_length": 35.36585365853659,
"alnum_prop": 0.5889655172413794,
"repo_name": "camon/Flexget",
"id": "f5074fa49d491f5fbec7069d751511ccf7777ab5",
"size": "1450",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "flexget/plugins/services/torrent_cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56725"
},
{
"name": "JavaScript",
"bytes": "455222"
},
{
"name": "Python",
"bytes": "1957167"
}
],
"symlink_target": ""
} |
import json
import athletemodel
import yate
names = athletemodel.get_names_from_store()
print(yate.start_response('application/json'))
print(json.dumps(sorted(names)))
| {
"content_hash": "ae9504bec3d76982cb07cd57dcd4bea5",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 46,
"avg_line_length": 19,
"alnum_prop": 0.783625730994152,
"repo_name": "leobarros/use_cabeca_python",
"id": "7851b618b704173fa8ce7e207cf180f4eb1406fb",
"size": "198",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "hfpy_code/chapter8/page272.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "60202"
}
],
"symlink_target": ""
} |
"""ActiveMaster definition."""
from config_bootstrap import Master
class TryserverChromiumAndroid(Master.Master4a):
project_name = 'TryserverChromiumAndroid'
master_port = 21401
slave_port = 31401
master_port_alt = 26401
buildbot_url = 'https://build.chromium.org/p/tryserver.chromium.android/'
buildbucket_bucket = 'master.tryserver.chromium.android'
service_account_file = 'service-account-chromium-tryserver.json'
| {
"content_hash": "fc30fafefd0f1fb75337366f1548efc5",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 75,
"avg_line_length": 36.083333333333336,
"alnum_prop": 0.7736720554272517,
"repo_name": "eunchong/build",
"id": "9d8df554ceda5f0844bb4f4c326363d6818edf58",
"size": "768",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "masters/master.tryserver.chromium.android/master_site_config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3128"
},
{
"name": "CSS",
"bytes": "211818"
},
{
"name": "HTML",
"bytes": "429981"
},
{
"name": "JavaScript",
"bytes": "75624"
},
{
"name": "Makefile",
"bytes": "21204"
},
{
"name": "Python",
"bytes": "6143109"
},
{
"name": "Shell",
"bytes": "23512"
}
],
"symlink_target": ""
} |
from Products.ZenRelations.RelSchema import ToManyCont, ToOne
from Products.ZenModel.Device import Device
class EMCIsilonDevice(Device):
_relations = Device._relations + (
('emcisilon_chassises', ToManyCont(ToOne,
'ZenPacks.community.EMCIsilon.EMCIsilonChassis',
'emcisilon_chassis'
)),
('emcisilon_disks', ToManyCont(ToOne,
'ZenPacks.community.EMCIsilon.EMCIsilonDisk',
'emcisilon_disk'
)),
('emcisilon_diskperfs', ToManyCont(ToOne,
'ZenPacks.community.EMCIsilon.EMCIsilonDiskPerf',
'emcisilon_diskperf'
)),
('emcisilon_fans', ToManyCont(ToOne,
'ZenPacks.community.EMCIsilon.EMCIsilonFan',
'emcisilon_fan'
)),
('emcisilon_nodeprotocolperfs', ToManyCont(ToOne,
'ZenPacks.community.EMCIsilon.EMCIsilonNodeProtocolPerf',
'emcisilon_nodeprotocolperf'
)),
('emcisilon_powersensors', ToManyCont(ToOne,
'ZenPacks.community.EMCIsilon.EMCIsilonPowerSensor',
'emcisilon_powersensor'
)),
('emcisilon_quotas', ToManyCont(ToOne,
'ZenPacks.community.EMCIsilon.EMCIsilonQuota',
'emcisilon_quota'
)),
('emcisilon_snapshots', ToManyCont(ToOne,
'ZenPacks.community.EMCIsilon.EMCIsilonSnapshot',
'emcisilon_snapshot'
)),
('emcisilon_snapshotschedules', ToManyCont(ToOne,
'ZenPacks.community.EMCIsilon.EMCIsilonSnapshotSchedule',
'emcisilon_snapshotschedule'
)),
('emcisilon_tempsensors', ToManyCont(ToOne,
'ZenPacks.community.EMCIsilon.EMCIsilonTempSensor',
'emcisilon_tempsensor'
)),
('emcisilon_licenses', ToManyCont(ToOne,
'ZenPacks.community.EMCIsilon.EMCIsilonLicense',
'emcisilon_license'
)),
)
| {
"content_hash": "6a594396c7cccf5155bacf4c1e4dea26",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 69,
"avg_line_length": 37.41509433962264,
"alnum_prop": 0.5985879979828542,
"repo_name": "linkslice/ZenPacks.community.EMCIsilon",
"id": "7ec6ded134fa0a253645a4ff4ed5edfbdb172b29",
"size": "1983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ZenPacks/community/EMCIsilon/EMCIsilonDevice.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "29500"
},
{
"name": "Python",
"bytes": "53650"
}
],
"symlink_target": ""
} |
"""Tests for the blog admin page."""
from __future__ import annotations
import logging
from core import feconf
from core.domain import config_domain
from core.domain import config_services
from core.tests import test_utils
from typing import List
class BlogAdminPageTests(test_utils.GenericTestBase):
"""Checks the access to the blog admin page and its rendering."""
def test_blog_admin_page_access_without_logging_in(self) -> None:
"""Tests access to the Blog Admin page."""
self.get_html_response('/blog-admin', expected_status_int=302)
def test_blog_admin_page_acess_without_being_blog_admin(self) -> None:
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.login(self.VIEWER_EMAIL)
self.get_html_response('/blog-admin', expected_status_int=401)
self.logout()
def test_blog_admin_page_acess_as_blog_admin(self) -> None:
self.signup(self.BLOG_ADMIN_EMAIL, self.BLOG_ADMIN_USERNAME)
self.add_user_role(
self.BLOG_ADMIN_USERNAME, feconf.ROLE_ID_BLOG_ADMIN)
self.login(self.BLOG_ADMIN_EMAIL)
self.get_html_response('/blog-admin')
self.logout()
class BlogAdminRolesHandlerTest(test_utils.GenericTestBase):
"""Checks the user role handling on the blog admin page."""
def setUp(self) -> None:
"""Complete the signup process for self.ADMIN_EMAIL."""
super().setUp()
self.signup(
self.BLOG_ADMIN_EMAIL, self.BLOG_ADMIN_USERNAME)
self.add_user_role(
self.BLOG_ADMIN_USERNAME,
feconf.ROLE_ID_BLOG_ADMIN)
def test_updating_and_removing_blog_editor_role_successfully(
self
) -> None:
user_email = '[email protected]'
username = 'user1'
self.signup(user_email, username)
self.login(self.BLOG_ADMIN_EMAIL)
# Check role correctly gets updated.
csrf_token = self.get_new_csrf_token()
response_dict = self.post_json(
feconf.BLOG_ADMIN_ROLE_HANDLER_URL,
{
'role': feconf.ROLE_ID_BLOG_POST_EDITOR,
'username': username
},
csrf_token=csrf_token,
expected_status_int=200)
self.assertEqual(response_dict, {})
# Check removing user from blog editor role.
csrf_token = self.get_new_csrf_token()
response_dict = self.put_json(
feconf.BLOG_ADMIN_ROLE_HANDLER_URL,
{'username': username},
csrf_token=csrf_token,
expected_status_int=200)
self.assertEqual(response_dict, {})
def test_updating_blog_editor_role_for_invalid_user(self) -> None:
username = 'invaliduser'
self.login(self.BLOG_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
feconf.BLOG_ADMIN_ROLE_HANDLER_URL,
{
'role': feconf.ROLE_ID_BLOG_ADMIN,
'username': username
},
csrf_token=csrf_token,
expected_status_int=400)
def test_removing_blog_editor_role_for_invalid_user(self) -> None:
username = 'invaliduser'
self.login(self.BLOG_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json(
feconf.BLOG_ADMIN_ROLE_HANDLER_URL,
{'username': username},
csrf_token=csrf_token,
expected_status_int=400)
csrf_token = self.get_new_csrf_token()
self.put_json(
feconf.BLOG_ADMIN_ROLE_HANDLER_URL,
{},
csrf_token=csrf_token,
expected_status_int=400)
class BlogAdminHandlerTest(test_utils.GenericTestBase):
"""Checks the user role handling on the blog admin page."""
def setUp(self) -> None:
"""Complete the signup process for self.ADMIN_EMAIL."""
super().setUp()
self.signup(
self.BLOG_ADMIN_EMAIL, self.BLOG_ADMIN_USERNAME)
self.add_user_role(
self.BLOG_ADMIN_USERNAME,
feconf.ROLE_ID_BLOG_ADMIN)
self.blog_admin_id = self.get_user_id_from_email(self.BLOG_ADMIN_EMAIL)
def test_update_configuration_property(self) -> None:
"""Test that configuration properties can be updated."""
self.login(self.BLOG_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
new_config_value = 20
response_dict = self.get_json('/blogadminhandler')
response_config_properties = response_dict['config_properties']
self.assertDictContainsSubset({
'value': 10,
}, response_config_properties[
config_domain.MAX_NUMBER_OF_TAGS_ASSIGNED_TO_BLOG_POST.name])
payload = {
'action': 'save_config_properties',
'new_config_property_values': {
config_domain.MAX_NUMBER_OF_TAGS_ASSIGNED_TO_BLOG_POST.name: (
new_config_value),
}
}
self.post_json('/blogadminhandler', payload, csrf_token=csrf_token)
response_dict = self.get_json('/blogadminhandler')
response_config_properties = response_dict['config_properties']
self.assertDictContainsSubset({
'value': new_config_value,
}, response_config_properties[
config_domain.MAX_NUMBER_OF_TAGS_ASSIGNED_TO_BLOG_POST.name])
self.logout()
def test_revert_config_property(self) -> None:
observed_log_messages: List[str] = []
def _mock_logging_function(msg: str, *args: str) -> None:
"""Mocks logging.info()."""
observed_log_messages.append(msg % args)
self.login(self.BLOG_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
config_services.set_property(
self.blog_admin_id,
'max_number_of_tags_assigned_to_blog_post',
20)
self.assertEqual(
config_domain.MAX_NUMBER_OF_TAGS_ASSIGNED_TO_BLOG_POST.value, 20)
with self.swap(logging, 'info', _mock_logging_function):
self.post_json(
'/blogadminhandler', {
'action': 'revert_config_property',
'config_property_id':
'max_number_of_tags_assigned_to_blog_post',
}, csrf_token=csrf_token)
self.assertFalse(config_domain.PROMO_BAR_ENABLED.value)
self.assertEqual(
observed_log_messages,
['[BLOG ADMIN] %s reverted config property:'
' max_number_of_tags_assigned_to_blog_post'
% self.blog_admin_id])
self.logout()
def test_invalid_values_for_updating_config_properties(self) -> None:
self.login(self.BLOG_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
new_config_value = [20]
response_dict = self.get_json('/blogadminhandler')
response_config_properties = response_dict['config_properties']
self.assertDictContainsSubset({
'value': 10,
}, response_config_properties[
config_domain.MAX_NUMBER_OF_TAGS_ASSIGNED_TO_BLOG_POST.name])
payload = {
'action': 'save_config_properties',
'new_config_property_values': {
config_domain.MAX_NUMBER_OF_TAGS_ASSIGNED_TO_BLOG_POST.name: (
new_config_value),
}
}
response_dict = self.post_json(
'/blogadminhandler', payload, csrf_token=csrf_token,
expected_status_int=400)
self.assertEqual(
response_dict['error'], 'Schema validation for \'new_config_'
'property_values\' failed: Could not convert list to int: [20]')
def test_config_prop_cannot_be_saved_without_new_config_property_values(
self
) -> None:
self.login(self.BLOG_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
payload = {
'action': 'save_config_properties',
'new_config_property_values': None
}
response_dict = self.post_json(
'/blogadminhandler', payload, csrf_token=csrf_token,
expected_status_int=500
)
self.assertEqual(
response_dict['error'],
'The new_config_property_values cannot be None when the '
'action is save_config_properties.'
)
def test_config_id_cannot_be_none_when_action_is_revert_config_property(
self
) -> None:
self.login(self.BLOG_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
payload = {
'action': 'revert_config_property',
'config_property_id': None
}
response_dict = self.post_json(
'/blogadminhandler', payload, csrf_token=csrf_token,
expected_status_int=500
)
self.assertEqual(
response_dict['error'],
'The config_property_id cannot be None when the action '
'is revert_config_property.'
)
def test_raise_error_for_updating_value_to_zero_for_max_tags(self) -> None:
self.login(self.BLOG_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
new_config_value = 0
response_dict = self.get_json('/blogadminhandler')
response_config_properties = response_dict['config_properties']
self.assertDictContainsSubset({
'value': 10,
}, response_config_properties[
config_domain.MAX_NUMBER_OF_TAGS_ASSIGNED_TO_BLOG_POST.name])
payload = {
'action': 'save_config_properties',
'new_config_property_values': {
config_domain.MAX_NUMBER_OF_TAGS_ASSIGNED_TO_BLOG_POST.name: (
new_config_value),
}
}
response_dict = self.post_json(
'/blogadminhandler', payload, csrf_token=csrf_token,
expected_status_int=400)
self.assertEqual(
response_dict['error'], 'Schema validation for \'new_config_'
'property_values\' failed: Validation failed: is_at_least'
' ({\'min_value\': 1}) for object 0'
)
def test_raise_error_for_updating_to_negative_value_for_max_tags(
self
) -> None:
self.login(self.BLOG_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
new_config_value = -2
response_dict = self.get_json('/blogadminhandler')
response_config_properties = response_dict['config_properties']
self.assertDictContainsSubset({
'value': 10,
}, response_config_properties[
config_domain.MAX_NUMBER_OF_TAGS_ASSIGNED_TO_BLOG_POST.name])
payload = {
'action': 'save_config_properties',
'new_config_property_values': {
config_domain.MAX_NUMBER_OF_TAGS_ASSIGNED_TO_BLOG_POST.name: (
new_config_value),
}
}
response_dict = self.post_json(
'/blogadminhandler', payload, csrf_token=csrf_token,
expected_status_int=400)
self.assertEqual(
response_dict['error'], 'Schema validation for \'new_config_'
'property_values\' failed: Validation failed: is_at_least'
' ({\'min_value\': 1}) for object -2'
)
| {
"content_hash": "b381e1f0dd0f88cdc34fb4383620e02c",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 79,
"avg_line_length": 35.569182389937104,
"alnum_prop": 0.5876580320042436,
"repo_name": "oppia/oppia",
"id": "c27dda210d2262207f78eb81b739f7fc5df7d6f0",
"size": "11916",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/controllers/blog_admin_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "476480"
},
{
"name": "HTML",
"bytes": "2092923"
},
{
"name": "JavaScript",
"bytes": "1247116"
},
{
"name": "PEG.js",
"bytes": "71377"
},
{
"name": "Python",
"bytes": "17628953"
},
{
"name": "Shell",
"bytes": "2240"
},
{
"name": "TypeScript",
"bytes": "15541372"
}
],
"symlink_target": ""
} |
"""
Support for coverage analysis.
"""
from peyotl.utility import get_logger
import unittest
import shutil
import os
_LOG = get_logger(__name__)
PEYOTL_COVERAGE_ANALYSIS_AVAILABLE = False
try:
from setuptools import Command
except ImportError:
_LOG.warn("setuptools.Command could not be imported: setuptools extensions not available")
else:
try:
import coverage
except ImportError:
_LOG.warn("coverage could not be imported: test coverage analysis not available")
else:
_LOG.info("coverage imported successfully: test coverage analysis available")
PEYOTL_COVERAGE_ANALYSIS_AVAILABLE = True
from peyotl.test import get_test_suite
from peyotl.test.support import pathmap
class CoverageAnalysis(Command):
"""
Code coverage analysis command.
"""
description = "run test coverage analysis"
user_options = [
('erase', None, "remove all existing coverage results"),
('branch', 'b', 'measure branch coverage in addition to statement coverage'),
('test-module=', 't', "explicitly specify a module to test (e.g. 'peyotl.test.test_containers')"),
('no-annotate', None, "do not create annotated source code files"),
('no-html', None, "do not create HTML report files"),
]
def initialize_options(self):
"""
Initialize options to default values.
"""
self.test_module = None
self.branch = False
self.erase = False
self.no_annotate = False
self.no_html = False
self.omit = []
p = os.path.join('peyotl', 'test')
for triple in os.walk(p):
root, files = triple[0], triple[2]
for fn in files:
if fn.endswith('.py'):
fp = os.path.join(root, fn)
self.omit.append(fp)
self.omit.append('*site-packages*')
def finalize_options(self):
pass
def run(self):
"""
Main command implementation.
"""
if self.erase:
_LOG.warn("removing coverage results directory: %s", pathmap.TESTS_COVERAGE_DIR)
try:
shutil.rmtree(pathmap.TESTS_COVERAGE_DIR)
except:
pass
else:
_LOG.info("running coverage analysis ...")
if self.test_module is None:
test_suite = get_test_suite()
else:
test_suite = get_test_suite([self.test_module])
runner = unittest.TextTestRunner()
cov = coverage.coverage(branch=self.branch)
cov.start()
runner.run(test_suite)
cov.stop()
if not self.no_annotate:
cov.annotate(omit=self.omit,
directory=pathmap.TESTS_COVERAGE_SOURCE_DIR)
if not self.no_html:
cov.html_report(omit=self.omit,
directory=pathmap.TESTS_COVERAGE_REPORT_DIR)
cov.report(omit=self.omit)
| {
"content_hash": "77934446e81b2a03131d2128409dc87a",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 114,
"avg_line_length": 37.806451612903224,
"alnum_prop": 0.4988623435722412,
"repo_name": "OpenTreeOfLife/peyotl",
"id": "fcc416b7529c9514be10530fdba46bad81af0bc6",
"size": "3974",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "peyotl/test/support/coverage_analysis.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1834266"
},
{
"name": "Python",
"bytes": "1010278"
},
{
"name": "Shell",
"bytes": "28989"
},
{
"name": "XSLT",
"bytes": "573"
}
],
"symlink_target": ""
} |
__test__ = False
if __name__ == '__main__':
import eventlet
eventlet.monkey_patch()
# Leaving unpatched select methods in the select module is a recipe
# for trouble and this test makes sure we don't do that.
#
# Issues:
# * https://bitbucket.org/eventlet/eventlet/issues/167
# * https://github.com/eventlet/eventlet/issues/169
import select
for name in ['devpoll', 'poll', 'epoll', 'kqueue', 'kevent']:
assert not hasattr(select, name), name
import sys
if sys.version_info >= (3, 4):
import selectors
for name in [
'PollSelector',
'EpollSelector',
'DevpollSelector',
'KqueueSelector',
]:
assert not hasattr(selectors, name), name
default = selectors.DefaultSelector
assert default is selectors.SelectSelector, default
print('pass')
| {
"content_hash": "d0fb721b7f083c108567681fa4f8a001",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 71,
"avg_line_length": 28.09375,
"alnum_prop": 0.5984427141268076,
"repo_name": "kawamon/hue",
"id": "a761e1e364a5344ec56eb4db2b27e60e4281df4b",
"size": "899",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/eventlet-0.24.1/tests/isolated/patcher_blocking_select_methods_are_deleted.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
} |
"""
This module provides convenient functions to transform sympy expressions to
lambda functions which can be used to calculate numerical values very fast.
"""
from __future__ import print_function, division
import inspect
import textwrap
from sympy.core.compatibility import (exec_, is_sequence, iterable,
NotIterable, string_types, range, builtins)
from sympy.utilities.decorator import doctest_depends_on
# These are the namespaces the lambda functions will use.
MATH = {}
MPMATH = {}
NUMPY = {}
TENSORFLOW = {}
SYMPY = {}
NUMEXPR = {}
# Default namespaces, letting us define translations that can't be defined
# by simple variable maps, like I => 1j
# These are separate from the names above because the above names are modified
# throughout this file, whereas these should remain unmodified.
MATH_DEFAULT = {}
MPMATH_DEFAULT = {}
NUMPY_DEFAULT = {"I": 1j}
TENSORFLOW_DEFAULT = {}
SYMPY_DEFAULT = {}
NUMEXPR_DEFAULT = {}
# Mappings between sympy and other modules function names.
MATH_TRANSLATIONS = {
"ceiling": "ceil",
"E": "e",
"ln": "log",
}
MPMATH_TRANSLATIONS = {
"Abs": "fabs",
"elliptic_k": "ellipk",
"elliptic_f": "ellipf",
"elliptic_e": "ellipe",
"elliptic_pi": "ellippi",
"ceiling": "ceil",
"chebyshevt": "chebyt",
"chebyshevu": "chebyu",
"E": "e",
"I": "j",
"ln": "log",
#"lowergamma":"lower_gamma",
"oo": "inf",
#"uppergamma":"upper_gamma",
"LambertW": "lambertw",
"MutableDenseMatrix": "matrix",
"ImmutableMatrix": "matrix",
"conjugate": "conj",
"dirichlet_eta": "altzeta",
"Ei": "ei",
"Shi": "shi",
"Chi": "chi",
"Si": "si",
"Ci": "ci"
}
NUMPY_TRANSLATIONS = {
"acos": "arccos",
"acosh": "arccosh",
"arg": "angle",
"asin": "arcsin",
"asinh": "arcsinh",
"atan": "arctan",
"atan2": "arctan2",
"atanh": "arctanh",
"ceiling": "ceil",
"E": "e",
"im": "imag",
"ln": "log",
"Mod": "mod",
"oo": "inf",
"re": "real",
"SparseMatrix": "array",
"ImmutableSparseMatrix": "array",
"Matrix": "array",
"MutableDenseMatrix": "array",
"ImmutableMatrix": "array",
"ImmutableDenseMatrix": "array",
}
TENSORFLOW_TRANSLATIONS = {
"Abs": "abs",
"ceiling": "ceil",
"im": "imag",
"ln": "log",
"Mod": "mod",
"conjugate": "conj",
"re": "real",
}
NUMEXPR_TRANSLATIONS = {}
# Available modules:
MODULES = {
"math": (MATH, MATH_DEFAULT, MATH_TRANSLATIONS, ("from math import *",)),
"mpmath": (MPMATH, MPMATH_DEFAULT, MPMATH_TRANSLATIONS, ("from mpmath import *",)),
"numpy": (NUMPY, NUMPY_DEFAULT, NUMPY_TRANSLATIONS, ("import_module('numpy')",)),
"tensorflow": (TENSORFLOW, TENSORFLOW_DEFAULT, TENSORFLOW_TRANSLATIONS, ("import_module('tensorflow')",)),
"sympy": (SYMPY, SYMPY_DEFAULT, {}, (
"from sympy.functions import *",
"from sympy.matrices import *",
"from sympy import Integral, pi, oo, nan, zoo, E, I",)),
"numexpr" : (NUMEXPR, NUMEXPR_DEFAULT, NUMEXPR_TRANSLATIONS,
("import_module('numexpr')", )),
}
def _import(module, reload="False"):
"""
Creates a global translation dictionary for module.
The argument module has to be one of the following strings: "math",
"mpmath", "numpy", "sympy", "tensorflow".
These dictionaries map names of python functions to their equivalent in
other modules.
"""
from sympy.external import import_module
try:
namespace, namespace_default, translations, import_commands = MODULES[
module]
except KeyError:
raise NameError(
"'%s' module can't be used for lambdification" % module)
# Clear namespace or exit
if namespace != namespace_default:
# The namespace was already generated, don't do it again if not forced.
if reload:
namespace.clear()
namespace.update(namespace_default)
else:
return
for import_command in import_commands:
if import_command.startswith('import_module'):
module = eval(import_command)
if module is not None:
namespace.update(module.__dict__)
continue
else:
try:
exec_(import_command, {}, namespace)
continue
except ImportError:
pass
raise ImportError(
"can't import '%s' with '%s' command" % (module, import_command))
# Add translated names to namespace
for sympyname, translation in translations.items():
namespace[sympyname] = namespace[translation]
# For computing the modulus of a sympy expression we use the builtin abs
# function, instead of the previously used fabs function for all
# translation modules. This is because the fabs function in the math
# module does not accept complex valued arguments. (see issue 9474). The
# only exception, where we don't use the builtin abs function is the
# mpmath translation module, because mpmath.fabs returns mpf objects in
# contrast to abs().
if 'Abs' not in namespace:
namespace['Abs'] = abs
@doctest_depends_on(modules=('numpy'))
def lambdify(args, expr, modules=None, printer=None, use_imps=True,
dummify=True):
"""
Returns a lambda function for fast calculation of numerical values.
If not specified differently by the user, ``modules`` defaults to
``["numpy"]`` if NumPy is installed, and ``["math", "mpmath", "sympy"]``
if it isn't, that is, SymPy functions are replaced as far as possible by
either ``numpy`` functions if available, and Python's standard library
``math``, or ``mpmath`` functions otherwise. To change this behavior, the
"modules" argument can be used. It accepts:
- the strings "math", "mpmath", "numpy", "numexpr", "sympy", "tensorflow"
- any modules (e.g. math)
- dictionaries that map names of sympy functions to arbitrary functions
- lists that contain a mix of the arguments above, with higher priority
given to entries appearing first.
The default behavior is to substitute all arguments in the provided
expression with dummy symbols. This allows for applied functions (e.g.
f(t)) to be supplied as arguments. Call the function with dummify=False if
dummy substitution is unwanted (and `args` is not a string). If you want
to view the lambdified function or provide "sympy" as the module, you
should probably set dummify=False.
For functions involving large array calculations, numexpr can provide a
significant speedup over numpy. Please note that the available functions
for numexpr are more limited than numpy but can be expanded with
implemented_function and user defined subclasses of Function. If specified,
numexpr may be the only option in modules. The official list of numexpr
functions can be found at:
https://github.com/pydata/numexpr#supported-functions
In previous releases ``lambdify`` replaced ``Matrix`` with ``numpy.matrix``
by default. As of release 1.0 ``numpy.array`` is the default.
To get the old default behavior you must pass in ``[{'ImmutableMatrix':
numpy.matrix}, 'numpy']`` to the ``modules`` kwarg.
>>> from sympy import lambdify, Matrix
>>> from sympy.abc import x, y
>>> import numpy
>>> array2mat = [{'ImmutableMatrix': numpy.matrix}, 'numpy']
>>> f = lambdify((x, y), Matrix([x, y]), modules=array2mat)
>>> f(1, 2)
matrix([[1],
[2]])
Usage
=====
(1) Use one of the provided modules:
>>> from sympy import sin, tan, gamma
>>> from sympy.utilities.lambdify import lambdastr
>>> from sympy.abc import x, y
>>> f = lambdify(x, sin(x), "math")
Attention: Functions that are not in the math module will throw a name
error when the lambda function is evaluated! So this would
be better:
>>> f = lambdify(x, sin(x)*gamma(x), ("math", "mpmath", "sympy"))
(2) Use some other module:
>>> import numpy
>>> f = lambdify((x,y), tan(x*y), numpy)
Attention: There are naming differences between numpy and sympy. So if
you simply take the numpy module, e.g. sympy.atan will not be
translated to numpy.arctan. Use the modified module instead
by passing the string "numpy":
>>> f = lambdify((x,y), tan(x*y), "numpy")
>>> f(1, 2)
-2.18503986326
>>> from numpy import array
>>> f(array([1, 2, 3]), array([2, 3, 5]))
[-2.18503986 -0.29100619 -0.8559934 ]
(3) Use a dictionary defining custom functions:
>>> def my_cool_function(x): return 'sin(%s) is cool' % x
>>> myfuncs = {"sin" : my_cool_function}
>>> f = lambdify(x, sin(x), myfuncs); f(1)
'sin(1) is cool'
Examples
========
>>> from sympy.utilities.lambdify import implemented_function
>>> from sympy import sqrt, sin, Matrix
>>> from sympy import Function
>>> from sympy.abc import w, x, y, z
>>> f = lambdify(x, x**2)
>>> f(2)
4
>>> f = lambdify((x, y, z), [z, y, x])
>>> f(1,2,3)
[3, 2, 1]
>>> f = lambdify(x, sqrt(x))
>>> f(4)
2.0
>>> f = lambdify((x, y), sin(x*y)**2)
>>> f(0, 5)
0.0
>>> row = lambdify((x, y), Matrix((x, x + y)).T, modules='sympy')
>>> row(1, 2)
Matrix([[1, 3]])
Tuple arguments are handled and the lambdified function should
be called with the same type of arguments as were used to create
the function.:
>>> f = lambdify((x, (y, z)), x + y)
>>> f(1, (2, 4))
3
A more robust way of handling this is to always work with flattened
arguments:
>>> from sympy.utilities.iterables import flatten
>>> args = w, (x, (y, z))
>>> vals = 1, (2, (3, 4))
>>> f = lambdify(flatten(args), w + x + y + z)
>>> f(*flatten(vals))
10
Functions present in `expr` can also carry their own numerical
implementations, in a callable attached to the ``_imp_``
attribute. Usually you attach this using the
``implemented_function`` factory:
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> func = lambdify(x, f(x))
>>> func(4)
5
``lambdify`` always prefers ``_imp_`` implementations to implementations
in other namespaces, unless the ``use_imps`` input parameter is False.
"""
from sympy.core.symbol import Symbol
from sympy.utilities.iterables import flatten
# If the user hasn't specified any modules, use what is available.
module_provided = True
if modules is None:
module_provided = False
try:
_import("numpy")
except ImportError:
# Use either numpy (if available) or python.math where possible.
# XXX: This leads to different behaviour on different systems and
# might be the reason for irreproducible errors.
modules = ["math", "mpmath", "sympy"]
else:
modules = ["numpy"]
# Get the needed namespaces.
namespaces = []
# First find any function implementations
if use_imps:
namespaces.append(_imp_namespace(expr))
# Check for dict before iterating
if isinstance(modules, (dict, str)) or not hasattr(modules, '__iter__'):
namespaces.append(modules)
else:
# consistency check
if _module_present('numexpr', modules) and len(modules) > 1:
raise TypeError("numexpr must be the only item in 'modules'")
namespaces += list(modules)
# fill namespace with first having highest priority
namespace = {}
for m in namespaces[::-1]:
buf = _get_namespace(m)
namespace.update(buf)
if hasattr(expr, "atoms"):
#Try if you can extract symbols from the expression.
#Move on if expr.atoms in not implemented.
syms = expr.atoms(Symbol)
for term in syms:
namespace.update({str(term): term})
if _module_present('numpy',namespaces) and printer is None:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import NumPyPrinter as printer
if _module_present('numexpr',namespaces) and printer is None:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import NumExprPrinter as printer
if _module_present('tensorflow',namespaces) and printer is None:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import TensorflowPrinter as printer
# Get the names of the args, for creating a docstring
if not iterable(args):
args = (args,)
names = []
# Grab the callers frame, for getting the names by inspection (if needed)
callers_local_vars = inspect.currentframe().f_back.f_locals.items()
for n, var in enumerate(args):
if hasattr(var, 'name'):
names.append(var.name)
else:
# It's an iterable. Try to get name by inspection of calling frame.
name_list = [var_name for var_name, var_val in callers_local_vars
if var_val is var]
if len(name_list) == 1:
names.append(name_list[0])
else:
# Cannot infer name with certainty. arg_# will have to do.
names.append('arg_' + str(n))
# Create lambda function.
lstr = lambdastr(args, expr, printer=printer, dummify=dummify)
flat = '__flatten_args__'
if flat in lstr:
namespace.update({flat: flatten})
# Provide lambda expression with builtins, and compatible implementation of range
namespace.update({'builtins':builtins, 'range':range})
func = eval(lstr, namespace)
# For numpy lambdify, wrap all input arguments in arrays.
# This is a fix for gh-11306.
if module_provided and _module_present('numpy',namespaces):
def array_wrap(funcarg):
def wrapper(*argsx, **kwargsx):
return funcarg(*[namespace['asarray'](i) for i in argsx], **kwargsx)
return wrapper
func = array_wrap(func)
# Apply the docstring
sig = "func({0})".format(", ".join(str(i) for i in names))
sig = textwrap.fill(sig, subsequent_indent=' '*8)
expr_str = str(expr)
if len(expr_str) > 78:
expr_str = textwrap.wrap(expr_str, 75)[0] + '...'
func.__doc__ = ("Created with lambdify. Signature:\n\n{sig}\n\n"
"Expression:\n\n{expr}").format(sig=sig, expr=expr_str)
return func
def _module_present(modname, modlist):
if modname in modlist:
return True
for m in modlist:
if hasattr(m, '__name__') and m.__name__ == modname:
return True
return False
def _get_namespace(m):
"""
This is used by _lambdify to parse its arguments.
"""
if isinstance(m, str):
_import(m)
return MODULES[m][0]
elif isinstance(m, dict):
return m
elif hasattr(m, "__dict__"):
return m.__dict__
else:
raise TypeError("Argument must be either a string, dict or module but it is: %s" % m)
def lambdastr(args, expr, printer=None, dummify=False):
"""
Returns a string that can be evaluated to a lambda function.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.utilities.lambdify import lambdastr
>>> lambdastr(x, x**2)
'lambda x: (x**2)'
>>> lambdastr((x,y,z), [z,y,x])
'lambda x,y,z: ([z, y, x])'
Although tuples may not appear as arguments to lambda in Python 3,
lambdastr will create a lambda function that will unpack the original
arguments so that nested arguments can be handled:
>>> lambdastr((x, (y, z)), x + y)
'lambda _0,_1: (lambda x,y,z: (x + y))(*list(__flatten_args__([_0,_1])))'
"""
# Transforming everything to strings.
from sympy.matrices import DeferredVector
from sympy import Dummy, sympify, Symbol, Function, flatten
if printer is not None:
if inspect.isfunction(printer):
lambdarepr = printer
else:
if inspect.isclass(printer):
lambdarepr = lambda expr: printer().doprint(expr)
else:
lambdarepr = lambda expr: printer.doprint(expr)
else:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import lambdarepr
def sub_args(args, dummies_dict):
if isinstance(args, str):
return args
elif isinstance(args, DeferredVector):
return str(args)
elif iterable(args):
dummies = flatten([sub_args(a, dummies_dict) for a in args])
return ",".join(str(a) for a in dummies)
else:
#Sub in dummy variables for functions or symbols
if isinstance(args, (Function, Symbol)):
dummies = Dummy()
dummies_dict.update({args : dummies})
return str(dummies)
else:
return str(args)
def sub_expr(expr, dummies_dict):
try:
expr = sympify(expr).xreplace(dummies_dict)
except Exception:
if isinstance(expr, DeferredVector):
pass
elif isinstance(expr, dict):
k = [sub_expr(sympify(a), dummies_dict) for a in expr.keys()]
v = [sub_expr(sympify(a), dummies_dict) for a in expr.values()]
expr = dict(zip(k, v))
elif isinstance(expr, tuple):
expr = tuple(sub_expr(sympify(a), dummies_dict) for a in expr)
elif isinstance(expr, list):
expr = [sub_expr(sympify(a), dummies_dict) for a in expr]
return expr
# Transform args
def isiter(l):
return iterable(l, exclude=(str, DeferredVector, NotIterable))
if isiter(args) and any(isiter(i) for i in args):
from sympy.utilities.iterables import flatten
import re
dum_args = [str(Dummy(str(i))) for i in range(len(args))]
iter_args = ','.join([i if isiter(a) else i
for i, a in zip(dum_args, args)])
lstr = lambdastr(flatten(args), expr, printer=printer, dummify=dummify)
flat = '__flatten_args__'
rv = 'lambda %s: (%s)(*list(%s([%s])))' % (
','.join(dum_args), lstr, flat, iter_args)
if len(re.findall(r'\b%s\b' % flat, rv)) > 1:
raise ValueError('the name %s is reserved by lambdastr' % flat)
return rv
dummies_dict = {}
if dummify:
args = sub_args(args, dummies_dict)
else:
if isinstance(args, str):
pass
elif iterable(args, exclude=DeferredVector):
args = ",".join(str(a) for a in args)
# Transform expr
if dummify:
if isinstance(expr, str):
pass
else:
expr = sub_expr(expr, dummies_dict)
expr = lambdarepr(expr)
return "lambda %s: (%s)" % (args, expr)
def _imp_namespace(expr, namespace=None):
""" Return namespace dict with function implementations
We need to search for functions in anything that can be thrown at
us - that is - anything that could be passed as `expr`. Examples
include sympy expressions, as well as tuples, lists and dicts that may
contain sympy expressions.
Parameters
----------
expr : object
Something passed to lambdify, that will generate valid code from
``str(expr)``.
namespace : None or mapping
Namespace to fill. None results in new empty dict
Returns
-------
namespace : dict
dict with keys of implemented function names within `expr` and
corresponding values being the numerical implementation of
function
Examples
========
>>> from sympy.abc import x
>>> from sympy.utilities.lambdify import implemented_function, _imp_namespace
>>> from sympy import Function
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> g = implemented_function(Function('g'), lambda x: x*10)
>>> namespace = _imp_namespace(f(g(x)))
>>> sorted(namespace.keys())
['f', 'g']
"""
# Delayed import to avoid circular imports
from sympy.core.function import FunctionClass
if namespace is None:
namespace = {}
# tuples, lists, dicts are valid expressions
if is_sequence(expr):
for arg in expr:
_imp_namespace(arg, namespace)
return namespace
elif isinstance(expr, dict):
for key, val in expr.items():
# functions can be in dictionary keys
_imp_namespace(key, namespace)
_imp_namespace(val, namespace)
return namespace
# sympy expressions may be Functions themselves
func = getattr(expr, 'func', None)
if isinstance(func, FunctionClass):
imp = getattr(func, '_imp_', None)
if imp is not None:
name = expr.func.__name__
if name in namespace and namespace[name] != imp:
raise ValueError('We found more than one '
'implementation with name '
'"%s"' % name)
namespace[name] = imp
# and / or they may take Functions as arguments
if hasattr(expr, 'args'):
for arg in expr.args:
_imp_namespace(arg, namespace)
return namespace
def implemented_function(symfunc, implementation):
""" Add numerical ``implementation`` to function ``symfunc``.
``symfunc`` can be an ``UndefinedFunction`` instance, or a name string.
In the latter case we create an ``UndefinedFunction`` instance with that
name.
Be aware that this is a quick workaround, not a general method to create
special symbolic functions. If you want to create a symbolic function to be
used by all the machinery of SymPy you should subclass the ``Function``
class.
Parameters
----------
symfunc : ``str`` or ``UndefinedFunction`` instance
If ``str``, then create new ``UndefinedFunction`` with this as
name. If `symfunc` is a sympy function, attach implementation to it.
implementation : callable
numerical implementation to be called by ``evalf()`` or ``lambdify``
Returns
-------
afunc : sympy.FunctionClass instance
function with attached implementation
Examples
========
>>> from sympy.abc import x
>>> from sympy.utilities.lambdify import lambdify, implemented_function
>>> from sympy import Function
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> lam_f = lambdify(x, f(x))
>>> lam_f(4)
5
"""
# Delayed import to avoid circular imports
from sympy.core.function import UndefinedFunction
# if name, create function to hold implementation
if isinstance(symfunc, string_types):
symfunc = UndefinedFunction(symfunc)
elif not isinstance(symfunc, UndefinedFunction):
raise ValueError('symfunc should be either a string or'
' an UndefinedFunction instance.')
# We need to attach as a method because symfunc will be a class
symfunc._imp_ = staticmethod(implementation)
return symfunc
| {
"content_hash": "6d705c0933bdd995c898c3fd760479b3",
"timestamp": "",
"source": "github",
"line_count": 671,
"max_line_length": 110,
"avg_line_length": 34.8822652757079,
"alnum_prop": 0.6095018371357771,
"repo_name": "souravsingh/sympy",
"id": "aab0444b3a23427989a7cb37e26206514ee03041",
"size": "23406",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "sympy/utilities/lambdify.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "15014164"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "2781"
},
{
"name": "Tcl",
"bytes": "1048"
},
{
"name": "XSLT",
"bytes": "366200"
}
],
"symlink_target": ""
} |
import sys
import os
import csv
from statsd import StatsClient
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from itertools import chain
import pycrfsuite
statsd = StatsClient()
print sys.getdefaultencoding()
reload(sys)
sys.setdefaultencoding('utf-8')
#change this if you would only like to do a certain number of files, useful for testing
maxNumFiles = 1000
#base dir for all data files
data_dir = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'data/'))
def deleteLabel(dictionary):
del dictionary['label']
return dictionary
#divide dataset into features and labels
@statsd.timer('formatDataSet')
def formatDataSet(features):
#Y = [[s['label']] for s in features]
#X = [[deleteLabel(s)] for s in features]
Y = [[word['label'] for word in article]for article in features]
X = [[deleteLabel(word) for word in article]for article in features]
print len(X)
return X, Y
#turn features into crfsuite readable object
def word2features(token):
features = {
'label' : token[0]
}
del token[0]
for elem in token:
seperated = elem.split('=')
nameFeat = seperated[0]
#if nameFeat == 'minDistanceVerbCue':
# continue
answer = seperated[1]
features.update( {
nameFeat : answer
})
return features
#creates a report for BIO encoded sequences
def bio_classification_report(y_true, y_pred):
"""
Classification report for a list of BIO-encoded sequences.
It computes token-level metrics and discards "O" labels.
Note that it requires scikit-learn 0.15+ (or a version from github master)
to calculate averages properly!
"""
lb = LabelBinarizer()
y_true_combined = lb.fit_transform(list(chain.from_iterable(y_true)))
y_pred_combined = lb.transform(list(chain.from_iterable(y_pred)))
tagset = set(lb.classes_)
tagset = sorted(tagset, key=lambda tag: tag.split('-', 1)[::-1])
class_indices = {cls: idx for idx, cls in enumerate(lb.classes_)}
return classification_report(
y_true_combined,
y_pred_combined,
labels = [class_indices[cls] for cls in tagset],
target_names = tagset,
)
#trains a classifier based on content token features
def trainAll(X_train, y_train):
crf = pycrfsuite.Trainer(
verbose = True,
#algorithm = 'l2sgd',
)
crf.set_params({
'max_iterations': 40, # stop earlier
'feature.minfreq': 2,
'feature.possible_transitions': False
})
for xseq, yseq in zip(X_train, y_train):
crf.append(xseq, yseq)
crf.train('ContentSpanClassifier8.crfsuite')
return crf
def trainData():
lines = []
trainingFileName = os.path.join(data_dir, 'PARCTrainContentSpans2.txt')
reader_object = open(trainingFileName, 'r')
lines = reader_object.readlines()
print 'length of training set'
print len(lines)
allFeatures = []
thisFileFeatures = []
print 'extracting features'
lastfilename = None
i = 0
for line in lines:
i = i + 1
row = line.split('\t')
features = word2features(row)
filename = features['filename']
if filename == lastfilename or lastfilename == None:
thisFileFeatures.append(features)
lastfilename = filename
else:
allFeatures.append(thisFileFeatures)
thisFileFeatures = []
thisFileFeatures.append(features)
lastfilename = filename
print len(allFeatures)
print 'features extracted'
print 'formatting data set'
x_train, y_train = formatDataSet(allFeatures)
prevPred = ['O']
for pred in y_train:
if pred == ['I'] and prevPred == ['O']:
print 'foundTRAIN'
prevPred = pred
print 'trainingData'
#classifier = TRAIN(x_train, y_train, x_test, y_test)
classifier = trainAll(x_train, y_train)
#tests the results of a classifier against a labelled dataset
def test(X_test, y_test):
tagger = pycrfsuite.Tagger()
#tagger.open('ContentSpanClassifier.crfsuite')
tagger.open('ContentSpanClassifier8.crfsuite')
print 'new'
y_pred2 = [tagger.tag(xseq) for xseq in X_test]
prevPred = 'O'
for pred in y_pred2:
if pred == 'I' and prevPred == 'O':
print 'foundTEST'
prevPred = pred
print(bio_classification_report(y_test, y_pred2))
y_test2 = [item for sublist in y_test for item in sublist]
y_pred3 = [item for sublist in y_pred2 for item in sublist]
print accuracy_score(y_test2, y_pred3)
#tests the classifier that is created against some data
def testData():
testingFileName = data_dir + '/PARCTestContentSpans1.txt'
reader_object = open(testingFileName, 'r')
lines = reader_object.readlines()
print 'length of test set'
print len(lines)
allFeatures = []
thisFileFeatures = []
print 'extracting features'
lastfilename = None
i = 0
for line in lines:
i = i + 1
row = line.split('\t')
features = word2features(row)
filename = features['filename']
if filename == lastfilename or lastfilename == None:
thisFileFeatures.append(features)
lastfilename = filename
else:
allFeatures.append(thisFileFeatures)
thisFileFeatures = []
thisFileFeatures.append(features)
lastfilename = filename
print len(allFeatures)
print 'features extracted'
print 'formatting data set'
x_test, y_test= formatDataSet(allFeatures)
test(x_test, y_test)
def main():
print sys.argv
if sys.argv[1] == '-test':
testData()
elif sys.argv[1] == '-train':
trainData()
else:
print 'Use of this command line is: python source/crfsuiteTests.py -test or -train'
#labelData()
if __name__ == '__main__':
main() | {
"content_hash": "6e334fe74c59fe83c3bc9dfe4de195fe",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 87,
"avg_line_length": 22.855967078189302,
"alnum_prop": 0.7005761613251711,
"repo_name": "networkdynamics/attribution-extraction",
"id": "c454146e4d7539724b5b1d1bbddd42d2e2422011",
"size": "5554",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/crfsuite.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "285685"
}
],
"symlink_target": ""
} |
import os
import copy
from itertools import izip
import pymetis
from distributions.io.stream import json_load
import distributions.lp.clustering
import loom.group
from loom.group import METIS_ARGS_TEMPFILE
from loom.group import find_consensus_grouping
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from nose.tools import assert_set_equal
def test_metis():
if os.path.exists(METIS_ARGS_TEMPFILE):
print 'Loading metis args from %s' % METIS_ARGS_TEMPFILE
args = json_load(METIS_ARGS_TEMPFILE)
else:
print 'Using simple metis args'
args = {
'nparts': 2,
'adjacency': [[0, 2, 3], [1, 2], [0, 1, 2], [0, 3]],
'eweights': [1073741824, 429496736, 357913952, 1073741824,
536870912, 429496736, 536870912, 1073741824,
357913952, 1073741824],
}
assert len(args['eweights']) == sum(map(len, args['adjacency']))
print 'Running unweighted metis...'
unweighted = dict(args)
del unweighted['eweights']
edge_cut, partition = pymetis.part_graph(**unweighted)
print 'Finished unweighted metis'
print 'Running metis...'
edge_cut, partition = pymetis.part_graph(**args)
print 'Finished metis'
class TestTypeIsCorrect:
def __init__(self):
ROW_COUNT = 1000
SAMPLE_COUNT = 10
self.clustering = distributions.lp.clustering.PitmanYor()
self.sample_count = SAMPLE_COUNT
self.row_ids = map(str, range(ROW_COUNT))
def sample_grouping(self):
assignments = self.clustering.sample_assignments(len(self.row_ids))
return loom.group.collate(izip(assignments, self.row_ids))
def sample_groupings(self):
return [self.sample_grouping() for _ in xrange(self.sample_count)]
def test_simple(self):
groupings = self.sample_groupings()
grouping = find_consensus_grouping(groupings, debug=True)
assert isinstance(grouping, list)
for row in grouping:
assert isinstance(row, loom.group.Row), row
row_ids = set(row.row_id for row in grouping)
assert len(row_ids) == len(grouping), 'grouping had duplicate rows'
assert_set_equal(set(self.row_ids), row_ids)
group_ids = sorted(list(set(row.group_id for row in grouping)))
assert_equal(
group_ids,
range(len(group_ids)),
'group ids were not a contiguous range of integers')
def test_sorting(self):
for i in xrange(10):
groupings = self.sample_groupings()
grouping = find_consensus_grouping(groupings, debug=True)
assert_equal(
grouping,
sorted(
grouping,
key=lambda x: (x.group_id, -x.confidence, x.row_id)))
group_ids = sorted(set(row.group_id for row in grouping))
counts = [
sum(1 for row in grouping if row.group_id == gid)
for gid in group_ids
]
assert_equal(counts, sorted(counts, reverse=True))
class TestValueIsCorrect:
def __init__(self):
LEVELS = 5
# LEVELS = 6 # XXX FIXME 6 or more levels fails
self.row_ids = []
self._grouping = []
for i in range(0, LEVELS):
level = range(2 ** i - 1, 2 ** (i + 1) - 1)
# level = sorted(map(str, level))
self.row_ids += level
self._grouping.append(level)
@property
def grouping(self):
return copy.deepcopy(self._grouping)
def _assert_correct(self, grouping, confidence=None):
if confidence is not None:
for row in grouping:
assert_almost_equal(row.confidence, confidence)
grouping.sort(key=(lambda r: r.row_id))
groups = loom.group.collate(
(row.group_id, row.row_id)
for row in grouping
)
groups.sort(key=len)
for group in groups:
group.sort()
assert_equal(groups, self.grouping)
def test_correct_on_perfect_data(self):
for sample_count in range(1, 11):
groupings = [self.grouping] * sample_count
grouping = find_consensus_grouping(groupings)
self._assert_correct(grouping, confidence=1.0)
def test_correct_on_noisy_data(self):
SAMPLE_COUNT = 10
GROUP_COUNT = len(self.grouping)
object_index = {
o: g
for g, group in enumerate(self.grouping)
for o in group
}
# each object is in the wrong place in one grouping
groupings = []
for g in range(SAMPLE_COUNT):
groups = self.grouping
for o in self.row_ids[g::SAMPLE_COUNT]:
t = object_index[o]
f = (t + 1) % GROUP_COUNT
groups[t].remove(o)
groups[f].append(o)
groups = filter(len, groups)
groupings.append(groups)
grouping = find_consensus_grouping(groupings)
self._assert_correct(grouping)
def test_correct_despite_outliers(self):
SAMPLE_COUNT = 10
fine = [[o] for o in self.row_ids]
coarse = [[o for o in self.row_ids]]
groupings = [fine, coarse] + [self.grouping] * (SAMPLE_COUNT - 2)
grouping = find_consensus_grouping(groupings)
self._assert_correct(grouping)
if __name__ == '__main__':
test_metis()
| {
"content_hash": "f1ece17d428b82e62ad225e78cd395a6",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 75,
"avg_line_length": 31.428571428571427,
"alnum_prop": 0.5841818181818181,
"repo_name": "fritzo/loom",
"id": "6fad23574d2d0be7148428e5594a3e5a9276525a",
"size": "7077",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "loom/test/test_group.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "400979"
},
{
"name": "CMake",
"bytes": "3958"
},
{
"name": "Makefile",
"bytes": "1851"
},
{
"name": "Protocol Buffer",
"bytes": "11819"
},
{
"name": "Python",
"bytes": "323689"
},
{
"name": "Shell",
"bytes": "395"
}
],
"symlink_target": ""
} |
"""
WSGI config for simplerec project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "simplerec.settings")
application = get_wsgi_application()
| {
"content_hash": "aff7e010b851a138a3d612ab02129482",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.6875,
"alnum_prop": 0.7721518987341772,
"repo_name": "dan-passaro/django-recommend",
"id": "5123b7c34d33fd0f8ca1a5af877347f442e8a079",
"size": "395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simplerec/simplerec/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2146"
},
{
"name": "Python",
"bytes": "85661"
}
],
"symlink_target": ""
} |
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors import LinkExtractor
import scrapy
import os
# IN the special case of the wto disputes, we need to get the list of all disputes, so we run this script preliminarily:
execfile("./fetchjson.py")
# Now we build the scraper:
# First we define a box to put information into (an object to recieve scrapted info)
class WTO_Dispute(scrapy.Item):
url = scrapy.Field()
# name = scrapy.Field()
# description = scrapy.Field()
# Then we define a class which will be used to direct scrapy as to what to gather from the web.
class WTO_Dispute_Link_Spider(CrawlSpider):
name = 'wtodisputes'
# allowed_domains=['wto.org']
start_urls = ['http://wto.org/english/tratop_e/dispu_e/dispu_status_e.htm']
def parse(self,response):
dispute=WTO_Dispute()
dispute['url']= response.xpath("//a/text()").extract()
# dispute['name']= # fill in here
# dispute['description']= # fill in here
return dispute
# based on http://doc.scrapy.org/en/0.24/intro/overview.html
# to run: run $ scrapy runspider wtoscraper.py -o wto_disputes.json
| {
"content_hash": "4ff53ef146bcb46fff497f25284e0ad7",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 120,
"avg_line_length": 32.2,
"alnum_prop": 0.7249334516415262,
"repo_name": "trcook/wto_python_scrape",
"id": "3230b0c971b8dfb4e1cadf1d93bf8dbb3e44239e",
"size": "1127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wto_scraper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "265"
},
{
"name": "Python",
"bytes": "102676"
},
{
"name": "Shell",
"bytes": "256"
}
],
"symlink_target": ""
} |
"""
Unit tests for alias_management.py.
"""
import boto3
from botocore.exceptions import ClientError
import pytest
import alias_management
@pytest.mark.parametrize('error_code, stop_on_action', [
(None, None),
('TestException', 'stub_create_key'),
('TestException', 'stub_create_alias'),
('TestException', 'stub_list_aliases'),
('TestException', 'stub_update_alias'),
('TestException', 'stub_delete_alias'),
('TestException', 'stub_schedule_key_deletion'),
])
def test_alias_management(make_stubber, stub_runner, monkeypatch, error_code, stop_on_action):
kms_client = boto3.client('kms')
kms_stubber = make_stubber(kms_client)
key_id = 'test-key-id'
key_id_2 = 'test-key-id-2'
alias = 'test-alias'
inputs = [
'y', 'test-alias', 'y', 'y', key_id_2, 'test-alias', 'y'
]
monkeypatch.setattr('builtins.input', lambda x: inputs.pop(0))
with stub_runner(error_code, stop_on_action) as runner:
runner.add(kms_stubber.stub_create_key, 'Alias management demo key', key_id)
runner.add(kms_stubber.stub_create_alias, alias, key_id, raise_and_continue=True)
if stop_on_action == 'stub_create_alias':
inputs.insert(2, 'test-alias')
runner.add(kms_stubber.stub_create_alias, alias, key_id, keep_going=True)
runner.add(kms_stubber.stub_list_aliases, 10, [alias]*10, truncated=True, keep_going=True)
runner.add(
kms_stubber.stub_list_aliases, 10, [alias]*10, marker='test-token', raise_and_continue=True)
runner.add(kms_stubber.stub_update_alias, alias, key_id_2, raise_and_continue=True)
runner.add(kms_stubber.stub_delete_alias, alias, raise_and_continue=True)
runner.add(kms_stubber.stub_schedule_key_deletion, key_id, 7, raise_and_continue=True)
if stop_on_action != 'stub_create_key':
alias_management.alias_management(kms_client)
else:
with pytest.raises(ClientError):
alias_management.alias_management(kms_client)
| {
"content_hash": "bfba3f78c7a542800d0d6a649a7b0e5a",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 104,
"avg_line_length": 39.627450980392155,
"alnum_prop": 0.6625432953983177,
"repo_name": "awsdocs/aws-doc-sdk-examples",
"id": "9dcb331e4b1ea41709fc064dccc6b7de86007a32",
"size": "2129",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/example_code/kms/test/test_alias_management.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "476653"
},
{
"name": "Batchfile",
"bytes": "900"
},
{
"name": "C",
"bytes": "3852"
},
{
"name": "C#",
"bytes": "2051923"
},
{
"name": "C++",
"bytes": "943634"
},
{
"name": "CMake",
"bytes": "82068"
},
{
"name": "CSS",
"bytes": "33378"
},
{
"name": "Dockerfile",
"bytes": "2243"
},
{
"name": "Go",
"bytes": "1764292"
},
{
"name": "HTML",
"bytes": "319090"
},
{
"name": "Java",
"bytes": "4966853"
},
{
"name": "JavaScript",
"bytes": "1655476"
},
{
"name": "Jupyter Notebook",
"bytes": "9749"
},
{
"name": "Kotlin",
"bytes": "1099902"
},
{
"name": "Makefile",
"bytes": "4922"
},
{
"name": "PHP",
"bytes": "1220594"
},
{
"name": "Python",
"bytes": "2507509"
},
{
"name": "Ruby",
"bytes": "500331"
},
{
"name": "Rust",
"bytes": "558811"
},
{
"name": "Shell",
"bytes": "63776"
},
{
"name": "Swift",
"bytes": "267325"
},
{
"name": "TypeScript",
"bytes": "119632"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.neutron.v2_0 import network
from neutronclient.neutron.v2_0 import router
from neutronclient.openstack.common.gettextutils import _
PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
class AddNetworkToDhcpAgent(neutronV20.NeutronCommand):
"""Add a network to a DHCP agent."""
def get_parser(self, prog_name):
parser = super(AddNetworkToDhcpAgent, self).get_parser(prog_name)
parser.add_argument(
'dhcp_agent',
help=_('ID of the DHCP agent.'))
parser.add_argument(
'network',
help=_('Network to add.'))
return parser
def run(self, parsed_args):
self.log.debug('run(%s)' % parsed_args)
neutron_client = self.get_client()
neutron_client.format = parsed_args.request_format
_net_id = neutronV20.find_resourceid_by_name_or_id(
neutron_client, 'network', parsed_args.network)
neutron_client.add_network_to_dhcp_agent(parsed_args.dhcp_agent,
{'network_id': _net_id})
print(_('Added network %s to DHCP agent') % parsed_args.network,
file=self.app.stdout)
class RemoveNetworkFromDhcpAgent(neutronV20.NeutronCommand):
"""Remove a network from a DHCP agent."""
def get_parser(self, prog_name):
parser = super(RemoveNetworkFromDhcpAgent, self).get_parser(prog_name)
parser.add_argument(
'dhcp_agent',
help=_('ID of the DHCP agent.'))
parser.add_argument(
'network',
help=_('Network to remove.'))
return parser
def run(self, parsed_args):
self.log.debug('run(%s)' % parsed_args)
neutron_client = self.get_client()
neutron_client.format = parsed_args.request_format
_net_id = neutronV20.find_resourceid_by_name_or_id(
neutron_client, 'network', parsed_args.network)
neutron_client.remove_network_from_dhcp_agent(
parsed_args.dhcp_agent, _net_id)
print(_('Removed network %s from DHCP agent') % parsed_args.network,
file=self.app.stdout)
class ListNetworksOnDhcpAgent(network.ListNetwork):
"""List the networks on a DHCP agent."""
unknown_parts_flag = False
def get_parser(self, prog_name):
parser = super(ListNetworksOnDhcpAgent,
self).get_parser(prog_name)
parser.add_argument(
'dhcp_agent',
help=_('ID of the DHCP agent.'))
return parser
def call_server(self, neutron_client, search_opts, parsed_args):
data = neutron_client.list_networks_on_dhcp_agent(
parsed_args.dhcp_agent, **search_opts)
return data
class ListDhcpAgentsHostingNetwork(neutronV20.ListCommand):
"""List DHCP agents hosting a network."""
resource = 'agent'
_formatters = {}
list_columns = ['id', 'host', 'admin_state_up', 'alive']
unknown_parts_flag = False
def get_parser(self, prog_name):
parser = super(ListDhcpAgentsHostingNetwork,
self).get_parser(prog_name)
parser.add_argument(
'network',
help=_('Network to query.'))
return parser
def extend_list(self, data, parsed_args):
for agent in data:
agent['alive'] = ":-)" if agent['alive'] else 'xxx'
def call_server(self, neutron_client, search_opts, parsed_args):
_id = neutronV20.find_resourceid_by_name_or_id(neutron_client,
'network',
parsed_args.network)
search_opts['network'] = _id
data = neutron_client.list_dhcp_agent_hosting_networks(**search_opts)
return data
class AddRouterToL3Agent(neutronV20.NeutronCommand):
"""Add a router to a L3 agent."""
def get_parser(self, prog_name):
parser = super(AddRouterToL3Agent, self).get_parser(prog_name)
parser.add_argument(
'l3_agent',
help=_('ID of the L3 agent.'))
parser.add_argument(
'router',
help=_('Router to add.'))
return parser
def run(self, parsed_args):
self.log.debug('run(%s)' % parsed_args)
neutron_client = self.get_client()
neutron_client.format = parsed_args.request_format
_id = neutronV20.find_resourceid_by_name_or_id(
neutron_client, 'router', parsed_args.router)
neutron_client.add_router_to_l3_agent(parsed_args.l3_agent,
{'router_id': _id})
print(_('Added router %s to L3 agent') % parsed_args.router,
file=self.app.stdout)
class RemoveRouterFromL3Agent(neutronV20.NeutronCommand):
"""Remove a router from a L3 agent."""
def get_parser(self, prog_name):
parser = super(RemoveRouterFromL3Agent, self).get_parser(prog_name)
parser.add_argument(
'l3_agent',
help=_('ID of the L3 agent.'))
parser.add_argument(
'router',
help=_('Router to remove.'))
return parser
def run(self, parsed_args):
self.log.debug('run(%s)' % parsed_args)
neutron_client = self.get_client()
neutron_client.format = parsed_args.request_format
_id = neutronV20.find_resourceid_by_name_or_id(
neutron_client, 'router', parsed_args.router)
neutron_client.remove_router_from_l3_agent(
parsed_args.l3_agent, _id)
print(_('Removed router %s from L3 agent') % parsed_args.router,
file=self.app.stdout)
class ListRoutersOnL3Agent(neutronV20.ListCommand):
"""List the routers on a L3 agent."""
_formatters = {'external_gateway_info':
router._format_external_gateway_info}
list_columns = ['id', 'name', 'external_gateway_info']
resource = 'router'
unknown_parts_flag = False
def get_parser(self, prog_name):
parser = super(ListRoutersOnL3Agent,
self).get_parser(prog_name)
parser.add_argument(
'l3_agent',
help=_('ID of the L3 agent to query.'))
return parser
def call_server(self, neutron_client, search_opts, parsed_args):
data = neutron_client.list_routers_on_l3_agent(
parsed_args.l3_agent, **search_opts)
return data
class ListL3AgentsHostingRouter(neutronV20.ListCommand):
"""List L3 agents hosting a router."""
resource = 'agent'
_formatters = {}
list_columns = ['id', 'host', 'admin_state_up', 'alive']
unknown_parts_flag = False
def get_parser(self, prog_name):
parser = super(ListL3AgentsHostingRouter,
self).get_parser(prog_name)
parser.add_argument('router',
help=_('Router to query.'))
return parser
def extend_list(self, data, parsed_args):
for agent in data:
agent['alive'] = ":-)" if agent['alive'] else 'xxx'
def call_server(self, neutron_client, search_opts, parsed_args):
_id = neutronV20.find_resourceid_by_name_or_id(neutron_client,
'router',
parsed_args.router)
search_opts['router'] = _id
data = neutron_client.list_l3_agent_hosting_routers(**search_opts)
return data
class ListPoolsOnLbaasAgent(neutronV20.ListCommand):
"""List the pools on a loadbalancer agent."""
list_columns = ['id', 'name', 'lb_method', 'protocol',
'admin_state_up', 'status']
resource = 'pool'
unknown_parts_flag = False
def get_parser(self, prog_name):
parser = super(ListPoolsOnLbaasAgent, self).get_parser(prog_name)
parser.add_argument(
'lbaas_agent',
help=_('ID of the loadbalancer agent to query.'))
return parser
def call_server(self, neutron_client, search_opts, parsed_args):
data = neutron_client.list_pools_on_lbaas_agent(
parsed_args.lbaas_agent, **search_opts)
return data
class GetLbaasAgentHostingPool(neutronV20.ListCommand):
"""Get loadbalancer agent hosting a pool.
Deriving from ListCommand though server will return only one agent
to keep common output format for all agent schedulers
"""
resource = 'agent'
list_columns = ['id', 'host', 'admin_state_up', 'alive']
unknown_parts_flag = False
def get_parser(self, prog_name):
parser = super(GetLbaasAgentHostingPool,
self).get_parser(prog_name)
parser.add_argument('pool',
help=_('Pool to query.'))
return parser
def extend_list(self, data, parsed_args):
for agent in data:
agent['alive'] = ":-)" if agent['alive'] else 'xxx'
def call_server(self, neutron_client, search_opts, parsed_args):
_id = neutronV20.find_resourceid_by_name_or_id(neutron_client,
'pool',
parsed_args.pool)
search_opts['pool'] = _id
agent = neutron_client.get_lbaas_agent_hosting_pool(**search_opts)
data = {'agents': [agent['agent']]}
return data
| {
"content_hash": "177106074523846811d7ca99bf3faca0",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 78,
"avg_line_length": 36.29501915708812,
"alnum_prop": 0.5855589570357859,
"repo_name": "cboling/SDNdbg",
"id": "db702b5bfff26db7476658c412c07598d5c27a92",
"size": "10111",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/old-stuff/pydzcvr/doc/neutronclient/neutron/v2_0/agentscheduler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6099"
},
{
"name": "HTML",
"bytes": "567814"
},
{
"name": "JavaScript",
"bytes": "545293"
},
{
"name": "Makefile",
"bytes": "11370"
},
{
"name": "PHP",
"bytes": "328"
},
{
"name": "Python",
"bytes": "295132"
},
{
"name": "Shell",
"bytes": "10978"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import functools
import itertools
import operator
# import os
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
# compat_urllib_parse_unquote,
# compat_urllib_parse_unquote_plus,
# compat_urllib_parse_urlparse,
)
from ..utils import (
ExtractorError,
int_or_none,
js_to_json,
orderedSet,
# sanitized_Request,
remove_quotes,
str_to_int,
)
# from ..aes import (
# aes_decrypt_text
# )
class PornHubIE(InfoExtractor):
IE_DESC = 'PornHub and Thumbzilla'
_VALID_URL = r'''(?x)
https?://
(?:
(?:[a-z]+\.)?pornhub\.com/(?:(?:view_video\.php|video/show)\?viewkey=|embed/)|
(?:www\.)?thumbzilla\.com/video/
)
(?P<id>[\da-z]+)
'''
_TESTS = [{
'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
'md5': '1e19b41231a02eba417839222ac9d58e',
'info_dict': {
'id': '648719015',
'ext': 'mp4',
'title': 'Seductive Indian beauty strips down and fingers her pink pussy',
'uploader': 'Babes',
'duration': 361,
'view_count': int,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'age_limit': 18,
'tags': list,
'categories': list,
},
}, {
# non-ASCII title
'url': 'http://www.pornhub.com/view_video.php?viewkey=1331683002',
'info_dict': {
'id': '1331683002',
'ext': 'mp4',
'title': '重庆婷婷女王足交',
'uploader': 'cj397186295',
'duration': 1753,
'view_count': int,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'age_limit': 18,
'tags': list,
'categories': list,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
'only_matching': True,
}, {
# removed at the request of cam4.com
'url': 'http://fr.pornhub.com/view_video.php?viewkey=ph55ca2f9760862',
'only_matching': True,
}, {
# removed at the request of the copyright owner
'url': 'http://www.pornhub.com/view_video.php?viewkey=788152859',
'only_matching': True,
}, {
# removed by uploader
'url': 'http://www.pornhub.com/view_video.php?viewkey=ph572716d15a111',
'only_matching': True,
}, {
# private video
'url': 'http://www.pornhub.com/view_video.php?viewkey=ph56fd731fce6b7',
'only_matching': True,
}, {
'url': 'https://www.thumbzilla.com/video/ph56c6114abd99a/horny-girlfriend-sex',
'only_matching': True,
}, {
'url': 'http://www.pornhub.com/video/show?viewkey=648719015',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
return re.findall(
r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?pornhub\.com/embed/[\da-z]+)',
webpage)
def _extract_count(self, pattern, webpage, name):
return str_to_int(self._search_regex(
pattern, webpage, '%s count' % name, fatal=False))
def _real_extract(self, url):
video_id = self._match_id(url)
def dl_webpage(platform):
return self._download_webpage(
'http://www.pornhub.com/view_video.php?viewkey=%s' % video_id,
video_id, headers={
'Cookie': 'age_verified=1; platform=%s' % platform,
})
webpage = dl_webpage('pc')
error_msg = self._html_search_regex(
r'(?s)<div[^>]+class=(["\'])(?:(?!\1).)*\b(?:removed|userMessageSection)\b(?:(?!\1).)*\1[^>]*>(?P<error>.+?)</div>',
webpage, 'error message', default=None, group='error')
if error_msg:
error_msg = re.sub(r'\s+', ' ', error_msg)
raise ExtractorError(
'PornHub said: %s' % error_msg,
expected=True, video_id=video_id)
tv_webpage = dl_webpage('tv')
assignments = self._search_regex(
r'(var.+?mediastring.+?)</script>', tv_webpage,
'encoded url').split(';')
js_vars = {}
def parse_js_value(inp):
inp = re.sub(r'/\*(?:(?!\*/).)*?\*/', '', inp)
if '+' in inp:
inps = inp.split('+')
return functools.reduce(
operator.concat, map(parse_js_value, inps))
inp = inp.strip()
if inp in js_vars:
return js_vars[inp]
return remove_quotes(inp)
for assn in assignments:
assn = assn.strip()
if not assn:
continue
assn = re.sub(r'var\s+', '', assn)
vname, value = assn.split('=', 1)
js_vars[vname] = parse_js_value(value)
video_url = js_vars['mediastring']
title = self._search_regex(
r'<h1>([^>]+)</h1>', tv_webpage, 'title', default=None)
# video_title from flashvars contains whitespace instead of non-ASCII (see
# http://www.pornhub.com/view_video.php?viewkey=1331683002), not relying
# on that anymore.
title = title or self._html_search_meta(
'twitter:title', webpage, default=None) or self._search_regex(
(r'<h1[^>]+class=["\']title["\'][^>]*>(?P<title>[^<]+)',
r'<div[^>]+data-video-title=(["\'])(?P<title>.+?)\1',
r'shareTitle\s*=\s*(["\'])(?P<title>.+?)\1'),
webpage, 'title', group='title')
flashvars = self._parse_json(
self._search_regex(
r'var\s+flashvars_\d+\s*=\s*({.+?});', webpage, 'flashvars', default='{}'),
video_id)
if flashvars:
thumbnail = flashvars.get('image_url')
duration = int_or_none(flashvars.get('video_duration'))
else:
title, thumbnail, duration = [None] * 3
video_uploader = self._html_search_regex(
r'(?s)From: .+?<(?:a\b[^>]+\bhref=["\']/(?:user|channel)s/|span\b[^>]+\bclass=["\']username)[^>]+>(.+?)<',
webpage, 'uploader', fatal=False)
view_count = self._extract_count(
r'<span class="count">([\d,\.]+)</span> views', webpage, 'view')
like_count = self._extract_count(
r'<span class="votesUp">([\d,\.]+)</span>', webpage, 'like')
dislike_count = self._extract_count(
r'<span class="votesDown">([\d,\.]+)</span>', webpage, 'dislike')
comment_count = self._extract_count(
r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment')
page_params = self._parse_json(self._search_regex(
r'page_params\.zoneDetails\[([\'"])[^\'"]+\1\]\s*=\s*(?P<data>{[^}]+})',
webpage, 'page parameters', group='data', default='{}'),
video_id, transform_source=js_to_json, fatal=False)
tags = categories = None
if page_params:
tags = page_params.get('tags', '').split(',')
categories = page_params.get('categories', '').split(',')
return {
'id': video_id,
'url': video_url,
'uploader': video_uploader,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'comment_count': comment_count,
# 'formats': formats,
'age_limit': 18,
'tags': tags,
'categories': categories,
}
class PornHubPlaylistBaseIE(InfoExtractor):
def _extract_entries(self, webpage):
# Only process container div with main playlist content skipping
# drop-down menu that uses similar pattern for videos (see
# https://github.com/rg3/youtube-dl/issues/11594).
container = self._search_regex(
r'(?s)(<div[^>]+class=["\']container.+)', webpage,
'container', default=webpage)
return [
self.url_result(
'http://www.pornhub.com/%s' % video_url,
PornHubIE.ie_key(), video_title=title)
for video_url, title in orderedSet(re.findall(
r'href="/?(view_video\.php\?.*\bviewkey=[\da-z]+[^"]*)"[^>]*\s+title="([^"]+)"',
container))
]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = self._extract_entries(webpage)
playlist = self._parse_json(
self._search_regex(
r'(?:playlistObject|PLAYLIST_VIEW)\s*=\s*({.+?});', webpage,
'playlist', default='{}'),
playlist_id, fatal=False)
title = playlist.get('title') or self._search_regex(
r'>Videos\s+in\s+(.+?)\s+[Pp]laylist<', webpage, 'title', fatal=False)
return self.playlist_result(
entries, playlist_id, title, playlist.get('description'))
class PornHubPlaylistIE(PornHubPlaylistBaseIE):
_VALID_URL = r'https?://(?:www\.)?pornhub\.com/playlist/(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.pornhub.com/playlist/4667351',
'info_dict': {
'id': '4667351',
'title': 'Nataly Hot',
},
'playlist_mincount': 2,
}]
class PornHubUserVideosIE(PornHubPlaylistBaseIE):
_VALID_URL = r'https?://(?:www\.)?pornhub\.com/users/(?P<id>[^/]+)/videos'
_TESTS = [{
'url': 'http://www.pornhub.com/users/zoe_ph/videos/public',
'info_dict': {
'id': 'zoe_ph',
},
'playlist_mincount': 171,
}, {
'url': 'http://www.pornhub.com/users/rushandlia/videos',
'only_matching': True,
}]
def _real_extract(self, url):
user_id = self._match_id(url)
entries = []
for page_num in itertools.count(1):
try:
webpage = self._download_webpage(
url, user_id, 'Downloading page %d' % page_num,
query={'page': page_num})
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
break
raise
page_entries = self._extract_entries(webpage)
if not page_entries:
break
entries.extend(page_entries)
return self.playlist_result(entries, user_id)
| {
"content_hash": "10bf314f178b1ad9ab3db4155f1979de",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 128,
"avg_line_length": 35.328990228013026,
"alnum_prop": 0.5069149917020099,
"repo_name": "achang97/YouTunes",
"id": "3428458afa987fb3eb8c3be061742cefce6e2734",
"size": "10878",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/youtube_dl/extractor/pornhub.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9366"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinxcontrib.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'sdep'
copyright = '2016, Matt McNaughton'
author = 'Matt McNaughton'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3'
# The full version, including alpha/beta/rc tags.
release = '0.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = 'sdep v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'sdepdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sdep.tex', 'sdep Documentation',
'Matt McNaughton', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sdep', 'sdep Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sdep', 'sdep Documentation',
author, 'sdep', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "db88f7b23efcfc50c6b9c4359f028427",
"timestamp": "",
"source": "github",
"line_count": 272,
"max_line_length": 80,
"avg_line_length": 32.893382352941174,
"alnum_prop": 0.7044819492567341,
"repo_name": "mattjmcnaughton/sdep",
"id": "71ed982c468255f8e7ecd33f9880de5a4445f959",
"size": "9387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1354"
},
{
"name": "Python",
"bytes": "35941"
}
],
"symlink_target": ""
} |
import eventlet
import mock
from ovs.db import idl
from ovs import poller
from neutron.agent.ovsdb.native import connection
from neutron.agent.ovsdb.native import idlutils
from neutron.tests import base
class TestOVSNativeConnection(base.BaseTestCase):
def setUp(self):
super(TestOVSNativeConnection, self).setUp()
@mock.patch.object(connection, 'TransactionQueue')
@mock.patch.object(idlutils, 'get_schema_helper')
@mock.patch.object(idl, 'Idl')
@mock.patch.object(idlutils, 'wait_for_change')
def _test_start(self, wfc, idl, gsh, tq, table_name_list=None):
gsh.return_value = helper = mock.Mock()
self.connection = connection.Connection(
mock.Mock(), mock.Mock(), mock.Mock())
with mock.patch.object(poller, 'Poller') as poller_mock,\
mock.patch('threading.Thread'):
poller_mock.return_value.block.side_effect = eventlet.sleep
self.connection.start(table_name_list=table_name_list)
reg_all_called = table_name_list is None
reg_table_called = table_name_list is not None
self.assertEqual(reg_all_called, helper.register_all.called)
self.assertEqual(reg_table_called, helper.register_table.called)
def test_start_without_table_name_list(self):
self._test_start()
def test_start_with_table_name_list(self):
self._test_start(table_name_list=['fake-table1', 'fake-table2'])
@mock.patch.object(connection, 'TransactionQueue')
@mock.patch.object(idl, 'Idl')
@mock.patch.object(idlutils, 'wait_for_change')
def test_start_call_graph(self, wait_for_change, idl, transaction_queue):
self.connection = connection.Connection(
mock.sentinel, mock.sentinel, mock.sentinel)
self.connection.get_schema_helper = mock.Mock()
helper = self.connection.get_schema_helper.return_value
self.connection.update_schema_helper = mock.Mock()
with mock.patch.object(poller, 'Poller') as poller_mock,\
mock.patch('threading.Thread'):
poller_mock.return_value.block.side_effect = eventlet.sleep
self.connection.start()
self.connection.get_schema_helper.assert_called_once_with()
self.connection.update_schema_helper.assert_called_once_with(helper)
def test_transaction_queue_init(self):
# a test to cover py34 failure during initialization (LP Bug #1580270)
# make sure no ValueError: can't have unbuffered text I/O is raised
connection.TransactionQueue()
@mock.patch.object(connection, 'TransactionQueue')
@mock.patch.object(idlutils, 'get_schema_helper')
@mock.patch.object(idlutils, 'wait_for_change')
def test_start_with_idl_class(self, wait_for_change, get_schema_helper,
transaction_queue):
idl_class = mock.Mock()
self.connection = connection.Connection(
mock.sentinel, mock.sentinel, mock.sentinel, idl_class=idl_class)
idl_instance = idl_class.return_value
self.connection.start()
self.assertEqual(idl_instance, self.connection.idl)
@mock.patch.object(connection, 'threading')
@mock.patch.object(connection.idlutils, 'wait_for_change')
@mock.patch.object(connection, 'idl')
@mock.patch.object(connection.helpers, 'enable_connection_uri')
@mock.patch.object(connection.idlutils, 'get_schema_helper')
def test_do_get_schema_helper_retry(self, mock_get_schema_helper,
mock_enable_conn,
mock_idl,
mock_wait_for_change,
mock_threading):
mock_helper = mock.Mock()
# raise until 3rd retry attempt
mock_get_schema_helper.side_effect = [Exception(), Exception(),
mock_helper]
conn = connection.Connection(
mock.Mock(), mock.Mock(), mock.Mock())
conn.start()
self.assertEqual(3, len(mock_get_schema_helper.mock_calls))
mock_helper.register_all.assert_called_once_with()
| {
"content_hash": "085cbae9f813aec9c163fe9c0d7b0823",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 78,
"avg_line_length": 46.3,
"alnum_prop": 0.6431485481161507,
"repo_name": "cloudbase/neutron",
"id": "0b0ea091a29053834a228c5063822ef00bfc209b",
"size": "4774",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/agent/ovsdb/native/test_connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "9942988"
},
{
"name": "Shell",
"bytes": "14325"
}
],
"symlink_target": ""
} |
'''
$Id: tzfile.py,v 1.8 2004/06/03 00:15:24 zenzen Exp $
'''
from cStringIO import StringIO
from datetime import datetime, timedelta
from struct import unpack, calcsize
from pytz.tzinfo import StaticTzInfo, DstTzInfo, memorized_ttinfo
from pytz.tzinfo import memorized_datetime, memorized_timedelta
def build_tzinfo(zone, fp):
head_fmt = '>4s 16x 6l'
head_size = calcsize(head_fmt)
(magic,ttisgmtcnt,ttisstdcnt,leapcnt,
timecnt,typecnt,charcnt) = unpack(head_fmt, fp.read(head_size))
# Make sure it is a tzinfo(5) file
assert magic == 'TZif'
# Read out the transition times, localtime indices and ttinfo structures.
data_fmt = '>%(timecnt)dl %(timecnt)dB %(ttinfo)s %(charcnt)ds' % dict(
timecnt=timecnt, ttinfo='lBB'*typecnt, charcnt=charcnt)
data_size = calcsize(data_fmt)
data = unpack(data_fmt, fp.read(data_size))
# make sure we unpacked the right number of values
assert len(data) == 2 * timecnt + 3 * typecnt + 1
transitions = [memorized_datetime(trans)
for trans in data[:timecnt]]
lindexes = list(data[timecnt:2 * timecnt])
ttinfo_raw = data[2 * timecnt:-1]
tznames_raw = data[-1]
del data
# Process ttinfo into separate structs
ttinfo = []
tznames = {}
i = 0
while i < len(ttinfo_raw):
# have we looked up this timezone name yet?
tzname_offset = ttinfo_raw[i+2]
if tzname_offset not in tznames:
nul = tznames_raw.find('\0', tzname_offset)
if nul < 0:
nul = len(tznames_raw)
tznames[tzname_offset] = tznames_raw[tzname_offset:nul]
ttinfo.append((ttinfo_raw[i],
bool(ttinfo_raw[i+1]),
tznames[tzname_offset]))
i += 3
# Now build the timezone object
if len(transitions) == 0:
ttinfo[0][0], ttinfo[0][2]
cls = type(zone, (StaticTzInfo,), dict(
zone=zone,
_utcoffset=memorized_timedelta(ttinfo[0][0]),
_tzname=ttinfo[0][2]))
else:
# Early dates use the first standard time ttinfo
i = 0
while ttinfo[i][1]:
i += 1
if ttinfo[i] == ttinfo[lindexes[0]]:
transitions[0] = datetime.min
else:
transitions.insert(0, datetime.min)
lindexes.insert(0, i)
# calculate transition info
transition_info = []
for i in range(len(transitions)):
inf = ttinfo[lindexes[i]]
utcoffset = inf[0]
if not inf[1]:
dst = 0
else:
for j in range(i-1, -1, -1):
prev_inf = ttinfo[lindexes[j]]
if not prev_inf[1]:
break
dst = inf[0] - prev_inf[0] # dst offset
tzname = inf[2]
# Round utcoffset and dst to the nearest minute or the
# datetime library will complain. Conversions to these timezones
# might be up to plus or minus 30 seconds out, but it is
# the best we can do.
utcoffset = int((utcoffset + 30) / 60) * 60
dst = int((dst + 30) / 60) * 60
transition_info.append(memorized_ttinfo(utcoffset, dst, tzname))
cls = type(zone, (DstTzInfo,), dict(
zone=zone,
_utc_transition_times=transitions,
_transition_info=transition_info))
return cls()
if __name__ == '__main__':
import os.path
from pprint import pprint
base = os.path.join(os.path.dirname(__file__), 'zoneinfo')
tz = build_tzinfo('Australia/Melbourne',
open(os.path.join(base,'Australia','Melbourne'), 'rb'))
tz = build_tzinfo('US/Eastern',
open(os.path.join(base,'US','Eastern'), 'rb'))
pprint(tz._utc_transition_times)
#print tz.asPython(4)
#print tz.transitions_mapping
| {
"content_hash": "135605adc44e983e6974d92b11398d5f",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 77,
"avg_line_length": 35.151785714285715,
"alnum_prop": 0.5664211328422657,
"repo_name": "pombreda/django-hotclub",
"id": "56b8397da5185000d935d5f2c2125e088e0531c4",
"size": "3959",
"binary": false,
"copies": "65",
"ref": "refs/heads/master",
"path": "libs/external_libs/pytz-2008b/pytz/tzfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "4084"
},
{
"name": "ApacheConf",
"bytes": "20791"
},
{
"name": "Assembly",
"bytes": "3294"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "C",
"bytes": "146718"
},
{
"name": "C#",
"bytes": "16949"
},
{
"name": "C++",
"bytes": "79372"
},
{
"name": "CSS",
"bytes": "147815"
},
{
"name": "Clean",
"bytes": "2878"
},
{
"name": "Clojure",
"bytes": "21964"
},
{
"name": "Common Lisp",
"bytes": "48874"
},
{
"name": "D",
"bytes": "5475"
},
{
"name": "Diff",
"bytes": "10634"
},
{
"name": "Dylan",
"bytes": "683"
},
{
"name": "Emacs Lisp",
"bytes": "29569"
},
{
"name": "Erlang",
"bytes": "5746"
},
{
"name": "FORTRAN",
"bytes": "27700"
},
{
"name": "Genshi",
"bytes": "2298"
},
{
"name": "Gettext Catalog",
"bytes": "764716"
},
{
"name": "Gnuplot",
"bytes": "10376"
},
{
"name": "Groff",
"bytes": "47103"
},
{
"name": "HTML",
"bytes": "8286203"
},
{
"name": "Haskell",
"bytes": "40419"
},
{
"name": "Java",
"bytes": "81989"
},
{
"name": "JavaScript",
"bytes": "74222"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "8677"
},
{
"name": "Makefile",
"bytes": "60193"
},
{
"name": "Matlab",
"bytes": "469"
},
{
"name": "Moocode",
"bytes": "3343"
},
{
"name": "Myghty",
"bytes": "4713"
},
{
"name": "Objective-C",
"bytes": "778"
},
{
"name": "PHP",
"bytes": "17078"
},
{
"name": "Pascal",
"bytes": "84519"
},
{
"name": "Perl",
"bytes": "32503"
},
{
"name": "Python",
"bytes": "7043260"
},
{
"name": "R",
"bytes": "3468"
},
{
"name": "Redcode",
"bytes": "830"
},
{
"name": "Ruby",
"bytes": "91160"
},
{
"name": "Scala",
"bytes": "138"
},
{
"name": "Scheme",
"bytes": "45856"
},
{
"name": "Shell",
"bytes": "119136"
},
{
"name": "Smalltalk",
"bytes": "16163"
},
{
"name": "Standard ML",
"bytes": "42416"
},
{
"name": "TeX",
"bytes": "77612"
},
{
"name": "VimL",
"bytes": "16660"
},
{
"name": "Visual Basic",
"bytes": "846"
},
{
"name": "XSLT",
"bytes": "755"
},
{
"name": "mupad",
"bytes": "2434"
}
],
"symlink_target": ""
} |
from __future__ import division
from __future__ import unicode_literals
import codecs
import os
import re
from django.contrib import messages
from django.contrib.auth import logout as logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.signals import user_logged_in
from django.http import Http404
from django.http import HttpResponse
from django.shortcuts import redirect
from django.template import Context
from django.template import RequestContext
from django.template import loader
from django.template.defaultfilters import slugify
from templatetags.docutils_extensions.utils import make_pdf
from models import *
def render_to_response(request, template, context):
c = RequestContext(request, context)
t = loader.get_template(template)
return HttpResponse(t.render(c))
def get_bg_color(request):
if request.user.is_staff:
return '#f1e8e8'
elif request.user.is_active:
return '#ffffe0'
else:
return '#ffffff'
def get_restriction_level(request):
if request.user.is_staff:
restriction_level = 2
elif request.user.is_authenticated():
restriction_level = 1
else:
restriction_level = 0
return restriction_level
def get_page(url, request):
try:
page = Page.objects.get(url=url)
page.update()
access_level = get_restriction_level(request)
page.down_list = []
for child in page.children:
if access_level >= child.restriction_level:
page.down_list.append(child)
child.down_list = []
for grandchild in child.children:
if access_level >= grandchild.restriction_level:
child.down_list.append(grandchild)
if page.parent:
page.side_list = []
for sibling in page.parent.children:
if access_level >= sibling.restriction_level:
# if page is a classroom, only show related classrooms
# if page.classroom is None or sibling.classroom is not None:
if 1==1: # no, show all ...
page.side_list.append(sibling)
if page.series_member:
i = page.side_list.index(page)
if i < len(page.side_list) - 1:
page.next = page.side_list[i + 1]
if i > 0:
page.prev = page.side_list[i - 1]
else:
page.side_list.remove(page)
except:
page = None
return page
def logged_in_message(sender, user, request, **kwargs):
messages.info(request, "Hi {}, you are now logged in.".format(request.user.first_name, request.user.username))
user_logged_in.connect(logged_in_message)
def core_logout(request):
logout(request)
if 'next' in request.GET:
return redirect(request.GET['next'])
else:
return redirect('page_root')
def core_index(request):
try:
classroom = Classroom.objects.filter(is_active=True).order_by('-first_date')[0]
page = classroom.home_page
return redirect('show_page', page.url)
except:
if request.user.is_staff:
return redirect('show_page', '/')
else:
context = { 'page': '/' }
template = 'core/page_404.html'
return render_to_response(request, template, context)
def list_classrooms(request):
classrooms = Classroom.objects.all()
active_classrooms = []
for classroom in Classroom.objects.all():
if classroom.is_active:
active_classrooms.append(classroom)
context = {
'classrooms': classrooms,
'active_classrooms': active_classrooms,
'bg_color': get_bg_color(request),
}
template = 'core/list_classrooms.html'
c = RequestContext(request, context)
t = loader.get_template(template)
return HttpResponse(t.render(c))
def show_page(request, url='/'):
page = get_page(url, request)
access_level = get_restriction_level(request)
redirect_page = False
if page is None:
redirect_page = True
elif access_level < page.restriction_level:
redirect_page = True
if redirect_page:
if request.user.is_staff:
return redirect('edit_page', url)
else:
context = { 'page': url }
template = 'core/page_404.html'
return render_to_response(request, template, context)
context = {
'page' : page,
'restriction_level': get_restriction_level(request),
'bg_color': get_bg_color(request),
}
if request.session.get('show_full', False):
template = 'core/show_full.html'
else:
template = 'core/show_page.html'
return render_to_response(request, template, context)
# @login_required(login_url=reverse('page_login')) # not sure why this doesn't work....
@login_required(login_url='/core/login/')
def edit_page(request, url='/'):
try:
page = Page.objects.get(url=url)
except: # we still have to pass 'url' to the template...
page = { 'url': url }
template = 'core/edit_page.html'
context = {
'page' : page,
}
return render_to_response(request, template, context)
def post_page(request):
if request.user.is_staff and request.method == 'POST':
url = request.POST['url'] + '/'
url = url.replace('//', '/')
if 'cancel' in request.POST:
return redirect('show_page', url)
try:
page = Page.objects.get(url=url)
except:
page = Page(url=url)
page.save()
if 'delete' in request.POST:
parent = page.parent
page.delete()
if parent:
return redirect('show_page', parent.url)
else:
return redirect('root_page')
new_url = request.POST['new_url'] + '/'
new_url = new_url.replace('//', '/')
# new_url = re.sub('[^\w^\/]+', '', new_url) # poor man's validation attempt
content = request.POST['content']
content = content.replace('\r\n','\n')
if 'update' in request.POST or 'submit' in request.POST:
page.url = new_url
page.raw_content = content
page.save()
if 'update' in request.POST:
return redirect('edit_page', page.url)
else:
return redirect('show_page', page.url)
# nothing should ever get here...
return redirect('root_page')
def print_page(request, url=''):
page = get_page(url, request)
context = {
'page' : page,
}
template = 'core/{}'.format(page.print_template)
c = Context(context, autoescape=False)
t = loader.get_template(template)
latex = t.render(c)
pdfname = make_pdf(latex, repeat=2)
pdffile = open(pdfname, 'rb')
outfile = '%s.pdf' % slugify(page.title)
response = HttpResponse(pdffile.read(), content_type='application/pdf')
# response['Content-disposition'] = 'attachment; filename=%s' % outfile
return response
def show_full(request, url):
request.session['show_full'] = not(request.session.get('show_full', False))
return redirect('show_page', url)
def list_students(request, classroom_slug):
if not request.user.is_staff:
return redirect('show_page', classroom_slug)
try:
classroom = Classroom.objects.get(slug=classroom_slug)
except:
return redirect('core_index')
context = {
'classroom': classroom,
'bg_color': get_bg_color(request),
}
template = 'core/list_students.html'
c = RequestContext(request, context)
t = loader.get_template(template)
return HttpResponse(t.render(c))
def edit_student_list(request, classroom_slug):
if not request.user.is_staff:
return redirect('show_page', classroom_slug)
try:
classroom = Classroom.objects.get(slug=classroom_slug)
except:
return redirect('core_index')
students = Student.objects.filter(classroom=classroom)
student_list_csv = ''
for student in students:
student_csv = ','.join([student.last_name,student.first_name,''])
student_list_csv += student_csv + '\n'
context = {
'student_list_csv': student_list_csv,
'classroom': classroom,
'bg_color': get_bg_color(request),
}
template = 'core/edit_student_list.html'
c = RequestContext(request, context)
t = loader.get_template(template)
return HttpResponse(t.render(c))
def post_student_list(request, classroom_slug):
if not request.user.is_staff:
return redirect('show_page', classroom_slug)
try:
classroom = Classroom.objects.get(slug=classroom_slug)
except:
return redirect('core_index')
students = Student.objects.filter(classroom=classroom)
if 'submit' in request.POST:
for student in students: # really should only delete those not in POST...
student.delete()
student_list = request.POST['student_list_csv'].splitlines()
for line in student_list:
[last_name, first_name, password] = [x.strip() for x in line.split(',')]
username = first_name[0].lower()
username += re.sub(r'[^a-z]', '', last_name.lower())[:7]
try:
student_user = User.objects.get(username=username)
except:
student_user = User()
student_user.username = username
student_user.last_name = last_name
student_user.first_name = first_name
student_user.set_password(password)
student_user.save()
student = Student()
student.classroom = classroom
student.user = student_user
student.save()
student_user.first_name = first_name
student_user.last_name = last_name
student_user.save()
return redirect('list_students', classroom_slug)
| {
"content_hash": "5d3470a272e17ba78afcf19bec388726",
"timestamp": "",
"source": "github",
"line_count": 346,
"max_line_length": 114,
"avg_line_length": 29.41907514450867,
"alnum_prop": 0.5964240102171137,
"repo_name": "dulrich15/spot",
"id": "0a7ff2339148d85bb923f8e18a9a0d6cc7f750e4",
"size": "10179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/core/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33796"
},
{
"name": "HTML",
"bytes": "29800"
},
{
"name": "JavaScript",
"bytes": "8151"
},
{
"name": "Python",
"bytes": "135279"
},
{
"name": "TeX",
"bytes": "219243"
}
],
"symlink_target": ""
} |
from six import iteritems, string_types
from toolz import valmap, complement, compose
import toolz.curried.operator as op
from zipline.utils.preprocess import preprocess
def ensure_upper_case(func, argname, arg):
if isinstance(arg, string_types):
return arg.upper()
else:
raise TypeError(
"{0}() expected argument '{1}' to"
" be a string, but got {2} instead.".format(
func.__name__, argname, arg,)
)
def expect_types(*_pos, **named):
"""
Preprocessing decorator that verifies inputs have expected types.
Usage
-----
>>> @expect_types(x=int, y=str)
... def foo(x, y):
... return x, y
...
>>> foo(2, '3')
(2, '3')
>>> foo(2.0, '3')
Traceback (most recent call last):
...
TypeError: foo() expected an argument of type 'int' for argument 'x', but got float instead. # noqa
"""
if _pos:
raise TypeError("expect_types() only takes keyword arguments.")
for name, type_ in iteritems(named):
if not isinstance(type_, (type, tuple)):
raise TypeError(
"expect_types() expected a type or tuple of types for "
"argument '{name}', but got {type_} instead.".format(
name=name, type_=type_,
)
)
return preprocess(**valmap(_expect_type, named))
def _qualified_name(obj):
"""
Return the fully-qualified name (ignoring inner classes) of a type.
"""
module = obj.__module__
if module in ('__builtin__', '__main__', 'builtins'):
return obj.__name__
return '.'.join([module, obj.__name__])
def _mk_check(exc, template, pred, actual):
def _check(func, argname, argvalue):
if pred(argvalue):
raise exc(
template % {
'funcname': _qualified_name(func),
'argname': argname,
'actual': actual(argvalue),
},
)
return argvalue
return _check
def _expect_type(type_):
"""
Factory for type-checking functions that work the @preprocess decorator.
"""
# Slightly different messages for type and tuple of types.
_template = (
"%(funcname)s() expected a value of type {type_or_types} "
"for argument '%(argname)s', but got %(actual)s instead."
)
if isinstance(type_, tuple):
template = _template.format(
type_or_types=' or '.join(map(_qualified_name, type_))
)
else:
template = _template.format(type_or_types=_qualified_name(type_))
return _mk_check(
TypeError,
template,
lambda v: not isinstance(v, type_),
compose(_qualified_name, type),
)
def optional(type_):
"""
Helper for use with `expect_types` when an input can be `type_` or `None`.
Returns an object such that both `None` and instances of `type_` pass
checks of the form `isinstance(obj, optional(type_))`.
Parameters
----------
type_ : type
Type for which to produce an option.
Examples
--------
>>> isinstance({}, optional(dict))
True
>>> isinstance(None, optional(dict))
True
>>> isinstance(1, optional(dict))
False
"""
return (type_, type(None))
def expect_element(*_pos, **named):
"""
Preprocessing decorator that verifies inputs are elements of some
expected collection.
Usage
-----
>>> @expect_element(x=('a', 'b'))
... def foo(x):
... return x.upper()
...
>>> foo('a')
'A'
>>> foo('b')
'B'
>>> foo('c')
Traceback (most recent call last):
...
ValueError: foo() expected a value in ('a', 'b') for argument 'x', but got 'c' instead. # noqa
Notes
-----
This uses the `in` operator (__contains__) to make the containment check.
This allows us to use any custom container as long as the object supports
the container protocol.
"""
if _pos:
raise TypeError("expect_element() only takes keyword arguments.")
return preprocess(**valmap(_expect_element, named))
def _expect_element(collection):
template = (
"%(funcname)s() expected a value in {collection} "
"for argument '%(argname)s', but got %(actual)s instead."
).format(collection=collection)
return _mk_check(
ValueError,
template,
complement(op.contains(collection)),
repr,
)
| {
"content_hash": "933ed817ab7286bcf108f8a87d2907ee",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 104,
"avg_line_length": 27.21818181818182,
"alnum_prop": 0.56312625250501,
"repo_name": "ChinaQuants/zipline",
"id": "23001ac8fd12eb7bf09ace3715d45f1fa4d882e1",
"size": "5072",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "zipline/utils/input_validation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "564"
},
{
"name": "Emacs Lisp",
"bytes": "138"
},
{
"name": "Python",
"bytes": "1535328"
},
{
"name": "Shell",
"bytes": "4065"
}
],
"symlink_target": ""
} |
import unittest
import botocore.session
class TestService(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
def test_get_endpoint_with_no_region(self):
# Test global endpoint service such as iam.
service = self.session.get_service('iam')
endpoint = service.get_endpoint()
self.assertEqual(endpoint.host, 'https://iam.amazonaws.com/')
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "7973bdf87e06082517fbd69dc379f853",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 69,
"avg_line_length": 26,
"alnum_prop": 0.6602564102564102,
"repo_name": "jonparrott/botocore",
"id": "efe4934ca4be5cd4e0ac87f4f2743dd2486cfdaa",
"size": "1609",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tests/unit/test_service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "476386"
}
],
"symlink_target": ""
} |
"""
Author: AsherYang
Email : [email protected]
Date : 2018/7/5
Desc : 后台管理系统, 添加商品handler
manager api handler
"""
import sys
sys.path.append('../')
import tornado.web
from constant import ResponseCode
from FFStoreJsonEncoder import *
from mgrsys.PermissionManager import PermissionManager
from net.GetGoods import GetGoods
from net.GetGoodsPhoto import GetGoodsPhoto
from net.GetGoodsAttr import GetGoodsAttr
from db.DbGoods import DbGoods
from db.DbGoodsPhoto import DbGoodsPhoto
from db.DbAttribute import DbAttribute
class ManagerUpdateGoodsHandler(tornado.web.RequestHandler):
def post(self, *args, **kwargs):
param = self.request.body.decode('utf-8')
param = json.loads(param)
sign = param['sign']
time = param['time']
admin_tel = param['tel']
sms_pwd = param['sms']
goods_id = param['goodsid']
cate_id = param['cateid']
brand_id = param['brandid']
goods_name = param['name']
market_price = param['marketprice']
current_price = param['currentprice']
sale_count = param['salecount']
stock_num = param['stocknum']
status = param['status']
goods_code = param['goodscode']
goods_logo = param['goodslogo']
thum_logo = param['thumlogo']
keywords = param['keywords']
# goods picture
photo_thum_list = param['photosthumlist']
# goods attribute
attr_market_year = param['marketyear']
attr_size_color_list = param['sizecolorlist']
permissionMgr = PermissionManager()
baseResponse = permissionMgr.checkAdminPermissionWithLoginStatus(sign=sign, time=time,
admin_tel=admin_tel, sms_pwd=sms_pwd)
if baseResponse.code == ResponseCode.success_check_admin_permission:
getGoods = GetGoods()
getPhoto = GetGoodsPhoto()
getAttr = GetGoodsAttr()
netGoodsDetail = getGoods.getGoodsById(goods_id=goods_id)
if not netGoodsDetail:
baseResponse.code = ResponseCode.fail_goods_not_found
baseResponse.desc = ResponseCode.fail_goods_not_found_desc
else:
# 更新数据
dbGoods = DbGoods()
dbGoods.goods_id = netGoodsDetail.id
if cate_id:
dbGoods.cate_id = cate_id
if brand_id:
dbGoods.brand_id = brand_id
if goods_name:
dbGoods.goods_name = goods_name
if market_price:
dbGoods.market_price = market_price
if current_price:
dbGoods.current_price = current_price
if sale_count:
dbGoods.sale_count = sale_count
if stock_num:
dbGoods.stock_num = stock_num
if status:
dbGoods.status = status
if goods_code:
dbGoods.goods_code = goods_code
if goods_logo:
dbGoods.goods_logo = goods_logo
if thum_logo:
dbGoods.thum_logo = thum_logo
if keywords:
dbGoods.keywords = keywords
# photo
dbPhotoThumList = []
if photo_thum_list:
# delete first when update
getPhoto.deleteGoodsPhotoById(goods_id=dbGoods.goods_id)
for photo_thum in photo_thum_list:
dbGoodsPhoto = DbGoodsPhoto()
dbGoodsPhoto.photo = photo_thum['photos']
dbGoodsPhoto.thum_photo = photo_thum['thum_photo']
dbGoodsPhoto.goods_id = dbGoods.goods_id
dbPhotoThumList.append(dbGoodsPhoto)
# attr
dbGoodsAttrList = []
if attr_size_color_list:
# delete first when update
getAttr.deleteGoodsAttr(dbGoods.goods_id)
for attr_size_color in attr_size_color_list:
dbGoodsAttr = DbAttribute()
attr_size = attr_size_color['goodssize']
attr_color = attr_size_color['goodscolor']
dbGoodsAttr.attr_size = attr_size
dbGoodsAttr.attr_color = attr_color
dbGoodsAttr.attr_market_year = attr_market_year
dbGoodsAttr.goods_id = dbGoods.goods_id
dbGoodsAttr.cate_id = dbGoods.cate_id
dbGoodsAttrList.append(dbGoodsAttr)
updateResult = getGoods.updateToDb(goods=dbGoods)
savePhotoResult = getPhoto.addGoodsPhotoList(dbPhotoThumList)
saveAttrResult = getAttr.addGoodsAttrList(dbGoodsAttrList)
if updateResult and savePhotoResult and saveAttrResult:
baseResponse.code = ResponseCode.op_success
baseResponse.desc = ResponseCode.op_success_desc
else:
baseResponse.code = ResponseCode.fail_op_db_data
baseResponse.desc = ResponseCode.fail_op_db_data_desc
json_str = json.dumps(baseResponse, cls=StrEncoder)
self.write(json_str)
| {
"content_hash": "ec99491f5dccd855e68ff091166b31fe",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 110,
"avg_line_length": 41.22727272727273,
"alnum_prop": 0.5525542080117604,
"repo_name": "AsherYang/ThreeLine",
"id": "8242764795eecb7294b4311c248500a2f70acf98",
"size": "5515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/ffstore/handler/ManagerUpdateGoodsHandler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "27166"
},
{
"name": "HTML",
"bytes": "674"
},
{
"name": "Java",
"bytes": "191029"
},
{
"name": "Python",
"bytes": "326761"
}
],
"symlink_target": ""
} |
from django.db.models import Manager, Sum
from django.db.models.query import RawQuerySet
from kolibri.content.models import AssessmentMetaData, ChannelMetadataCache, ContentNode, File
from le_utils.constants import content_kinds
from rest_framework import serializers
from .content_db_router import default_database_is_attached, get_active_content_database
class ChannelMetadataCacheSerializer(serializers.ModelSerializer):
class Meta:
model = ChannelMetadataCache
fields = ('root_pk', 'id', 'name', 'description', 'author', 'last_updated')
class FileSerializer(serializers.ModelSerializer):
storage_url = serializers.SerializerMethodField()
preset = serializers.SerializerMethodField()
download_url = serializers.SerializerMethodField()
def get_storage_url(self, target_node):
return target_node.get_storage_url()
def get_preset(self, target_node):
return target_node.get_preset()
def get_download_url(self, target_node):
return target_node.get_download_url()
class Meta:
model = File
fields = ('storage_url', 'id', 'priority', 'checksum', 'available', 'file_size', 'extension', 'preset', 'lang',
'supplementary', 'thumbnail', 'download_url')
class AssessmentMetaDataSerializer(serializers.ModelSerializer):
assessment_item_ids = serializers.JSONField(default='[]')
mastery_model = serializers.JSONField(default='{}')
class Meta:
model = AssessmentMetaData
fields = ('assessment_item_ids', 'number_of_assessments', 'mastery_model', 'randomize', 'is_manipulable', )
def get_summary_logs(content_ids, user):
from kolibri.logger.models import ContentSummaryLog
if not content_ids:
return ContentSummaryLog.objects.none()
# get all summary logs for the current user that correspond to the descendant content nodes
if default_database_is_attached(): # if possible, do a direct join between the content and default databases
channel_alias = get_active_content_database()
return ContentSummaryLog.objects.using(channel_alias).filter(user=user, content_id__in=content_ids)
else: # otherwise, convert the leaf queryset into a flat list of ids and use that
return ContentSummaryLog.objects.filter(user=user, content_id__in=list(content_ids))
def get_topic_progress_fraction(topic, user):
leaf_ids = topic.get_descendants(include_self=False).order_by().exclude(
kind=content_kinds.TOPIC).values_list("content_id", flat=True)
return round(
(get_summary_logs(leaf_ids, user).aggregate(Sum('progress'))['progress__sum'] or 0)/(len(leaf_ids) or 1),
4
)
def get_content_progress_fraction(content, user):
from kolibri.logger.models import ContentSummaryLog
try:
# add up all the progress for the logs, and divide by the total number of content nodes to get overall progress
overall_progress = ContentSummaryLog.objects.get(user=user, content_id=content.content_id).progress
except ContentSummaryLog.DoesNotExist:
return None
return round(overall_progress, 4)
def get_topic_and_content_progress_fraction(node, user):
if node.kind == content_kinds.TOPIC:
return get_topic_progress_fraction(node, user)
else:
return get_content_progress_fraction(node, user)
def get_topic_and_content_progress_fractions(nodes, user):
leaf_ids = nodes.get_descendants(include_self=True).order_by().exclude(
kind=content_kinds.TOPIC).values_list("content_id", flat=True)
summary_logs = get_summary_logs(leaf_ids, user)
overall_progress = {log['content_id']: round(log['progress'], 4) for log in summary_logs.values('content_id', 'progress')}
for node in nodes:
if node.kind == content_kinds.TOPIC:
leaf_ids = node.get_descendants(include_self=True).order_by().exclude(
kind=content_kinds.TOPIC).values_list("content_id", flat=True)
overall_progress[node.content_id] = round(
sum(overall_progress.get(leaf_id, 0) for leaf_id in leaf_ids)/len(leaf_ids),
4
)
return overall_progress
def get_content_progress_fractions(nodes, user):
if isinstance(nodes, RawQuerySet) or isinstance(nodes, list):
leaf_ids = [datum.content_id for datum in nodes]
else:
leaf_ids = nodes.exclude(kind=content_kinds.TOPIC).values_list("content_id", flat=True)
summary_logs = get_summary_logs(leaf_ids, user)
# make a lookup dict for all logs to allow mapping from content_id to current progress
overall_progress = {log['content_id']: round(log['progress'], 4) for log in summary_logs.values('content_id', 'progress')}
return overall_progress
class ContentNodeListSerializer(serializers.ListSerializer):
def to_representation(self, data):
if not data:
return data
if 'request' not in self.context or not self.context['request'].user.is_facility_user:
progress_dict = {}
else:
user = self.context["request"].user
# Don't annotate topic progress as too expensive
progress_dict = get_content_progress_fractions(data, user)
# Dealing with nested relationships, data can be a Manager,
# so, first get a queryset from the Manager if needed
iterable = data.all() if isinstance(data, Manager) else data
return [
self.child.to_representation(
item,
progress_fraction=progress_dict.get(item.content_id),
annotate_progress_fraction=False
) for item in iterable
]
class ContentNodeSerializer(serializers.ModelSerializer):
parent = serializers.PrimaryKeyRelatedField(read_only=True)
files = FileSerializer(many=True, read_only=True)
assessmentmetadata = AssessmentMetaDataSerializer(read_only=True, allow_null=True, many=True)
license = serializers.StringRelatedField(many=False)
license_description = serializers.SerializerMethodField()
def __init__(self, *args, **kwargs):
# Instantiate the superclass normally
super(ContentNodeSerializer, self).__init__(*args, **kwargs)
# enable dynamic fields specification!
if 'request' in self.context and self.context['request'].GET.get('fields', None):
fields = self.context['request'].GET['fields'].split(',')
# Drop any fields that are not specified in the `fields` argument.
allowed = set(fields)
existing = set(self.fields.keys())
for field_name in existing - allowed:
self.fields.pop(field_name)
def to_representation(self, instance, progress_fraction=None, annotate_progress_fraction=True):
if progress_fraction is None and annotate_progress_fraction:
if 'request' not in self.context or not self.context['request'].user.is_facility_user:
# Don't try to annotate for a non facility user
progress_fraction = 0.0
else:
user = self.context["request"].user
progress_fraction = get_content_progress_fraction(instance, user)
value = super(ContentNodeSerializer, self).to_representation(instance)
value['progress_fraction'] = progress_fraction
return value
def get_license_description(self, target_node):
if target_node.license_id:
return target_node.license.license_description
return ''
class Meta:
model = ContentNode
fields = (
'pk', 'content_id', 'title', 'description', 'kind', 'available', 'sort_order', 'license_owner',
'license', 'license_description', 'files', 'parent', 'author',
'assessmentmetadata',
)
list_serializer_class = ContentNodeListSerializer
class ContentNodeProgressListSerializer(serializers.ListSerializer):
def to_representation(self, data):
if not data:
return data
if 'request' not in self.context or not self.context['request'].user.is_facility_user:
progress_dict = {}
else:
user = self.context["request"].user
# Don't annotate topic progress as too expensive
progress_dict = get_topic_and_content_progress_fractions(data, user)
# Dealing with nested relationships, data can be a Manager,
# so, first get a queryset from the Manager if needed
iterable = data.all() if isinstance(data, Manager) else data
return [
self.child.to_representation(
item,
progress_fraction=progress_dict.get(item.content_id, 0.0),
annotate_progress_fraction=False
) for item in iterable
]
class ContentNodeProgressSerializer(serializers.Serializer):
def to_representation(self, instance, progress_fraction=None, annotate_progress_fraction=True):
if progress_fraction is None and annotate_progress_fraction:
if 'request' not in self.context or not self.context['request'].user.is_facility_user:
# Don't try to annotate for a non facility user
progress_fraction = 0
else:
user = self.context["request"].user
progress_fraction = get_topic_and_content_progress_fraction(instance, user) or 0.0
return {
'pk': instance.pk,
'progress_fraction': progress_fraction,
}
class Meta:
list_serializer_class = ContentNodeProgressListSerializer
| {
"content_hash": "19ab5ba66350a9b42ebadf30e171e56f",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 126,
"avg_line_length": 41.791304347826085,
"alnum_prop": 0.6657303370786517,
"repo_name": "rtibbles/kolibri",
"id": "6dec239b7051a56f3df7a39f9ffa55f9b3849a74",
"size": "9612",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "kolibri/content/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "27623"
},
{
"name": "HTML",
"bytes": "4406"
},
{
"name": "JavaScript",
"bytes": "510659"
},
{
"name": "Makefile",
"bytes": "3914"
},
{
"name": "Python",
"bytes": "664765"
},
{
"name": "Shell",
"bytes": "10337"
},
{
"name": "Vue",
"bytes": "481473"
}
],
"symlink_target": ""
} |
"""
convert.py
Convert music from command line. Place music in music directory (or other
configured directory) and then run this script.
"""
import os
from lib.config import config as cf
from lib.sclog import sclog
import lib.utils as utils
clear = cf.flags.get('clear')
def main(clear=clear):
"""
Converts source music to formatted wavs, and converts wavs to frequency
timestep npy files.
Runs utils.convert_source_dir() and utils.convert_wavs_to_freq() from
command line.
Arguments:
bool:clear (cf.flags.clear) -- Remove existing files from data directory
"""
if clear:
for file in os.listdir(cf.data.target_dir):
if file[0] == '.':
continue
utils.safe_remove(os.path.join(cf.data.target_dir, file))
# Convert source dir
k = utils.convert_source_dir()
sclog('Converted {0} source files to formatted wavs.'.format(k))
# Convert wavs to freq timesteps
k = utils.convert_wavs_to_freqs()
sclog('Converted {0} wavs to frequency timesteps.'.format(k))
print('Done converting {0} files.'.format(k))
if (__name__ == '__main__'):
main() | {
"content_hash": "3cfc9023bf5f7dcaeff1136dece6e849",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 76,
"avg_line_length": 26.804878048780488,
"alnum_prop": 0.6915377616014559,
"repo_name": "hexagrahamaton/soundcube",
"id": "12c86e273d6e08023baa92e4ab0d4eb66426773d",
"size": "1122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "convert.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63978"
}
],
"symlink_target": ""
} |
from cloudbaseinit import exception
from cloudbaseinit.openstack.common import log as logging
from cloudbaseinit.osutils import factory as osutils_factory
from cloudbaseinit.plugins.common import base
from cloudbaseinit.plugins.common import constants
from cloudbaseinit.utils.windows import security
from cloudbaseinit.utils.windows import winrmconfig
from cloudbaseinit.utils.windows import x509
LOG = logging.getLogger(__name__)
class ConfigWinRMCertificateAuthPlugin(base.BasePlugin):
def _get_credentials(self, shared_data):
user_name = shared_data.get(constants.SHARED_DATA_USERNAME)
if not user_name:
raise exception.CloudbaseInitException(
"Cannot execute plugin as the username has not been set in "
"the plugins shared data")
password = shared_data.get(constants.SHARED_DATA_PASSWORD)
if not password:
raise exception.CloudbaseInitException(
"Cannot execute plugin as the password has not been set in the"
" plugins shared data")
# For security reasons unset the password in the shared_data
# as it is currently not needed by other plugins
shared_data[constants.SHARED_DATA_PASSWORD] = None
return (user_name, password)
def execute(self, service, shared_data):
user_name, password = self._get_credentials(shared_data)
certs_data = service.get_client_auth_certs()
if not certs_data:
LOG.info("WinRM certificate authentication cannot be configured "
"as a certificate has not been provided in the metadata")
return (base.PLUGIN_EXECUTION_DONE, False)
osutils = osutils_factory.get_os_utils()
security_utils = security.WindowsSecurityUtils()
# On Windows Vista, 2008, 2008 R2 and 7, changing the configuration of
# the winrm service will fail with an "Access is denied" error if the
# User Account Control remote restrictions are enabled.
# The solution to this issue is to temporarily disable the User Account
# Control remote restrictions.
# https://support.microsoft.com/kb/951016
disable_uac_remote_restrictions = (osutils.check_os_version(6, 0) and
not osutils.check_os_version(6, 2)
and security_utils
.get_uac_remote_restrictions())
try:
if disable_uac_remote_restrictions:
LOG.debug("Disabling UAC remote restrictions")
security_utils.set_uac_remote_restrictions(enable=False)
winrm_config = winrmconfig.WinRMConfig()
winrm_config.set_auth_config(certificate=True)
for cert_data in certs_data:
cert_manager = x509.CryptoAPICertManager()
cert_thumprint, cert_upn = cert_manager.import_cert(
cert_data, store_name=x509.STORE_NAME_ROOT)
if not cert_upn:
LOG.error("WinRM certificate authentication cannot be "
"configured as the provided certificate lacks a "
"subject alt name containing an UPN (OID "
"1.3.6.1.4.1.311.20.2.3)")
continue
if winrm_config.get_cert_mapping(cert_thumprint, cert_upn):
winrm_config.delete_cert_mapping(cert_thumprint, cert_upn)
LOG.info("Creating WinRM certificate mapping for user "
"%(user_name)s with UPN %(cert_upn)s",
{'user_name': user_name, 'cert_upn': cert_upn})
winrm_config.create_cert_mapping(cert_thumprint, cert_upn,
user_name, password)
finally:
if disable_uac_remote_restrictions:
LOG.debug("Enabling UAC remote restrictions")
security_utils.set_uac_remote_restrictions(enable=True)
return (base.PLUGIN_EXECUTION_DONE, False)
| {
"content_hash": "b5654300e062b84397571ffa10644503",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 79,
"avg_line_length": 45.06521739130435,
"alnum_prop": 0.6114327062228654,
"repo_name": "whiteear/cloudbase-init",
"id": "9211abc61a66703c9ac307829fd8707b87c358ac",
"size": "4762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudbaseinit/plugins/windows/winrmcertificateauth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1034309"
}
],
"symlink_target": ""
} |
from lxml import html
from django.core.management.base import BaseCommand, CommandError
from engine.models import WebPage
from engine.crawler import get_page_links
class Command(BaseCommand):
def handle(self, *args, **options):
pages = WebPage.objects.only(*['url', 'raw_html']).filter(
crawled=True, status__startswith=2)[0:50]
print(len(pages))
i = 0
for page in pages:
i += 1
print('page: %d' % i)
doc = html.fromstring(str.encode(page.raw_html))
out_links = get_page_links(page.url, doc)
print(out_links)
| {
"content_hash": "baf57f4b4914a562bef4f5a14dcfc7d7",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 66,
"avg_line_length": 25.916666666666668,
"alnum_prop": 0.6077170418006431,
"repo_name": "tanguy-s/ucl-search-engine",
"id": "4edbe0ca93a4bf89a3890d6a0029d8d41bb5589d",
"size": "622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "engine/management/commands/build_graph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106515"
},
{
"name": "Shell",
"bytes": "10385"
}
],
"symlink_target": ""
} |
import os
import re
import typing
class Styles(object):
Normal = 'Normal'
Heading = 'Heading'
Heading1 = 'Heading1'
Heading2 = 'Heading2'
Diagram = 'Diagram'
Metadata = 'Metadata'
def parse(source):
lines = source.splitlines()
states = []
state = StartState()
for line in lines:
new_state = state.add(line)
if new_state is not state:
states.append(new_state)
state = new_state
links = {}
unlinked_states = []
last_linked = None
for s in states:
try:
name, address = s.get_link()
except AttributeError:
unlinked_states.append(s)
last_linked = s
continue
last_linked.raw_text += s.raw_text
links[name] = address
printed_states = []
last_printed = None
for s in unlinked_states:
if not s.is_printed():
last_printed.raw_text += s.raw_text
continue
s.text = replace_links(s.text, links)
s.text = re.sub(r'\*\*(.*?)\*\*', r'<b>\1</b>', s.text)
printed_states.append(s)
last_printed = s
return printed_states
def replace_links(text, links):
replacement = ''
index = 0
for match in re.finditer(r'\[([^]]+)]\[([^]]+)]', text):
block = text[index:match.start()]
replacement += block
link_name = match.group(2)
link = links[link_name]
replacement += '<a href="%s">%s</a>' % (link, match.group(1))
index = match.end()
if replacement:
block = text[index:]
if block.startswith(' '):
block = ' ' + block[1:]
replacement += block
return replacement or text
class ParsingState(object):
def __init__(self,
text: str = None,
style: str = Styles.Normal,
bullet: str = None,
raw_text: str = None):
self.text = text
if raw_text is None:
raw_text = text
self.raw_text = '' if raw_text is None else raw_text + os.linesep
self.style = style
self.bullet = bullet
self.image_path = None
def add(self, line):
if line.startswith(' '):
return DiagramState('').add(line)
if line == '---':
return MetadataState(line)
match = re.match(r'^\[([^]]+)]:\s*(.*)$', line)
if match:
link_name = match.group(1)
address = match.group(2)
return LinkState(link_name, address, line)
match = re.match(r'^(#+)\s*(.*?)\s*#*$', line)
if match:
level = len(match.group(1))
heading_text = match.group(2)
return ParsingState(heading_text,
Styles.Heading + str(level),
raw_text=line)
match = re.match(r'^((\*)|(\d+)\.)\s+(.*)$', line)
if match:
bullet = match.group(2) or match.group(3)
text = match.group(4)
return BulletedState(text, bullet=bullet, raw_text=line)
if line:
return ParagraphState(line)
self.raw_text += line + os.linesep
return self
def is_printed(self):
return True
def __repr__(self):
return 'ParsingState({!r}, {!r}, {!r})'.format(self.text,
self.style,
self.bullet)
def __eq__(self, other):
return (self.text == other.text and
self.style == other.style and
self.bullet == other.bullet)
def write_markdown(self, markdown_file: typing.TextIO):
""" Write the markdown for this state.
:param markdown_file: the destination to write the markdown to
"""
markdown_file.write(self.raw_text)
class StartState(ParsingState):
def is_printed(self):
return False
def __repr__(self):
return 'StartState()'
class MetadataState(ParsingState):
def __init__(self, text=None):
super().__init__(text, Styles.Metadata)
def is_printed(self):
return True
def __repr__(self):
return f'MetadataState({self.text!r})'
def add(self, line):
self.raw_text += line + os.linesep
if line == '---':
return StartState()
match = re.match('title: *', line)
if match is not None:
self.text = line[match.end():]
return self
class ParagraphState(ParsingState):
def add(self, line):
self.raw_text += line + os.linesep
if line:
self.text = self.text + ' ' + line
return self
return StartState()
def __repr__(self):
return 'ParagraphState({!r})'.format(self.text)
class BulletedState(ParsingState):
def add(self, line):
if not line.startswith(' '):
return StartState().add(line)
self.raw_text += line + os.linesep
self.text = self.text + ' ' + line.strip()
return self
def __repr__(self):
return 'BulletedState({!r}, bullet={!r})'.format(self.text,
self.bullet)
class LinkState(ParsingState):
def __init__(self, name: str, address: str, raw_text: str):
super().__init__(raw_text=raw_text)
self.name = name
self.address = address
def get_link(self):
return self.name, self.address
def is_printed(self):
return False
def __repr__(self):
return 'LinkState({!r}, {!r})'.format(self.name, self.address)
class DiagramState(ParsingState):
def __init__(self, line):
super(DiagramState, self).__init__(line, Styles.Diagram)
def add(self, line):
if line.startswith(' '):
self.text = self.text + line[4:] + '\n'
return self
return StartState().add(line)
def write_markdown(self, markdown_file: typing.TextIO):
print(f'', file=markdown_file)
print(file=markdown_file)
def __repr__(self):
return 'DiagramState({!r})'.format(self.text)
| {
"content_hash": "01d3f13b44dbe179a84d602210574bd6",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 73,
"avg_line_length": 28.685185185185187,
"alnum_prop": 0.5237249838605552,
"repo_name": "donkirkby/domiculture",
"id": "a4a0a97bd084af06c215aaf98d574f2c037d4682",
"size": "6196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "book_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "94468"
}
],
"symlink_target": ""
} |
from threading import Thread
import time
class ThreadFirst(Thread):
def run(self):
for i in range(5):
time.sleep(1)
print('hi')
ThreadFirst().start()
| {
"content_hash": "2e1a8dbc914392d43aa3d0479a197891",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 28,
"avg_line_length": 17.181818181818183,
"alnum_prop": 0.5925925925925926,
"repo_name": "int19h/PTVS",
"id": "7d1d0d8a374748439e8d07d6a69c9bf0a154cc78",
"size": "189",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Python/Tests/TestData/DebuggerProject/BreakpointMainThreadExited.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP.NET",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "7975"
},
{
"name": "C",
"bytes": "21444"
},
{
"name": "C#",
"bytes": "11297254"
},
{
"name": "C++",
"bytes": "175131"
},
{
"name": "CSS",
"bytes": "4109"
},
{
"name": "HTML",
"bytes": "213660"
},
{
"name": "JavaScript",
"bytes": "44401"
},
{
"name": "PowerShell",
"bytes": "18157"
},
{
"name": "Pug",
"bytes": "2807"
},
{
"name": "Python",
"bytes": "620501"
},
{
"name": "Rich Text Format",
"bytes": "260880"
},
{
"name": "Smarty",
"bytes": "3663"
},
{
"name": "Tcl",
"bytes": "24968"
},
{
"name": "Vim Snippet",
"bytes": "17303"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from tkinter import filedialog
from tkinter import *
from astride import Streak
import glob
import sys
import shutil
import os
import tkinter as tk
import matplotlib.pyplot as plt
from astropy.io import fits
import numpy as np
class _Args:
file_pathin = ""
file_pathout = ""
shape = 0.14
area = 120
contour = 12
diff = False
v = False
start_frame = -1
end_frame = -1
def get_arg(argv):
arguments = _Args()
if len(argv) == 1:
return get_int_arg(arguments)
else:
return get_cmd_arg(argv, arguments)
def mk_diff(f0,f1,diff, v):
hdu0 = fits.open(f0, ignore_missing_end=True)
hdu1 = fits.open(f1, ignore_missing_end=True)
h1 = hdu1[0].header
d0 = hdu0[0].data
d1 = hdu1[0].data
if v:
print("DEBUG mean/std: %s %s %s %g %g" % (f0,f1,diff,d0.mean(),d0.std()))
d2 = d1-d0
fits.writeto(diff,d2,h1,overwrite=True)
def get_cmd_arg(argv, arguments):
import argparse as ap
parser = ap.ArgumentParser()
parser.add_argument('-i','--filein', nargs=1,help = 'Directory to input fits directory')
parser.add_argument('-o','--fileout', nargs=1,help = 'Directory to output folder')
parser.add_argument('-s','--shape', nargs=1,help = 'Shape factor')
parser.add_argument('-a','--area', nargs=1,help = 'Minimum area to be considered a streak')
parser.add_argument('-c','--contour',nargs=1,help = 'Control value')
parser.add_argument('-d','--difference',action = 'store_const',const = arguments.diff , help = 'Create difference images')
parser.add_argument('-v','--verbose', action = 'store_const', const = arguments.v, help = 'Verbose')
parser.add_argument('-S','--start',nargs = 1, help = 'Start Frame (starts at 1)')
parser.add_argument('-E','--end', nargs = 1, help = 'End Frame')
args=vars(parser.parse_args())
if args['filein'] != None: arguments.file_pathin = (args['filein'][0])
if args['fileout'] != None: arguments.file_pathout = (args['fileout'][0])
else:
if arguments.file_pathin.endswith("/"):
arguments.file_pathout = arguments.file_pathin[0:len(arguments.file_pathin) -1] + "-output"
else:
arguments.file_pathout = arguments.file_pathin +"-output"
if args['shape'] != None: arguments.shape = float(args['shape'][0])
if args['area'] != None: arguments.area = float(args['area'][0])
if args['contour'] != None: arguments.contour = float(args['contour'][0])
if args['difference'] != None: arguments.diff = True
if args['verbose'] != None: arguments.v = True
if args['start'] != None: arguments.start_frame = int(args['start'][0])
if args['end'] != None: arguments.end_frame = int(args['end'][0])
return arguments
def get_int_arg(arguments):
#Creates folder input browsers
winin = tk.Tk()
winin.withdraw()
winin.attributes('-topmost', True)
arguments.file_pathin = filedialog.askdirectory(title = "Select input")
#Creates folder output browsers
winout = tk.Tk()
winout.withdraw()
winout.attributes('-topmost', True)
arguments.file_pathout = filedialog.askdirectory(title = "Select output")
winout.destroy()
winin.destroy()
top = tk.Tk()
nshape = tk.StringVar()
narea = tk.StringVar()
ncontour = tk.StringVar()
nstart_frame = tk.StringVar()
nend_frame = tk.StringVar()
ndiff = tk.IntVar()
nv = tk.IntVar()
L1 = Label(top, text="Shape value (1=circle, .1=thin oval) (default = 0.14): ")
L1.pack()
eshape = Entry(top, textvariable=nshape)
#nshape = float(nshape.get())
eshape.pack()
L2 = Label(top, text="Minimum area (default = 120): ")
L2.pack()
earea = Entry(top, textvariable=narea)
#narea = float(narea.get())
earea.pack()
L3 = Label(top, text="Contour value (higher=only brighter streaks detected)(default = 12): ")
L3.pack()
econtour = Entry(top, textvariable=ncontour)
#ncontour = float(ncontour.get())
econtour.pack()
L4 = Label(top, text="Frame at which to start (default = 1)")
L4.pack()
estart_frame = Entry(top, textvariable=nstart_frame)
#nstart_frame = float(nstart_frame.get())
estart_frame.pack()
L5 = Label(top, text="Last frame (does not process last frame) (default goes to end)")
L5.pack()
eend_frame = Entry(top, textvariable=nend_frame)
#nend_frame = float(nend_frame.get())
eend_frame.pack()
C1 = Checkbutton(top, text = "Difference imaging (default = false)", variable = ndiff, \
onvalue=1, offvalue=0 )
C2 = Checkbutton(top, text = "Verbose mode (default = false)", variable = nv, \
onvalue = 1, offvalue = 0 )
def save(nshape, narea, ncontour, nstart_frame, nend_frame, ndiff, nv):
if len(nshape.get()) != 0:
arguments.shape = float(nshape.get())
if len(narea.get()) != 0:
arguments.area = float(narea.get())
if len(ncontour.get()) != 0:
arguments.contour = float(ncontour.get())
if len(nstart_frame.get()) != 0:
arguments.start_frame = int(nstart_frame.get())
if len(nend_frame.get()) != 0:
arguments.end_frame = int(nend_frame.get())
arguments.diff = ndiff.get()
arguments.v = nv.get()
top.destroy()
s = Button(top, text="Save Values", command=lambda: save(nshape, narea, ncontour, nstart_frame, nend_frame, ndiff, nv))
C1.pack()
C2.pack()
s.pack()
top.mainloop()
return(arguments)
def do_dir(arguments):
"""
process a directory 'd'
"""
#print("Outputting in directory: " + dsum)
if not os.path.exists(arguments.file_pathout):
os.mkdir(arguments.file_pathout)
num = 0
detected = 0
fileCount = 0
zero_image = 0
bad_image = 0
bad_image_paths = []
# debug/verbose
if arguments.v:
print('DEBUG: shape=%g area=%g contour=%g' % (arguments.shape,arguments.area,arguments.contour))
ffs = glob.glob(arguments.file_pathin+'/*.FIT') + glob.glob(arguments.file_pathin+'/*.fit') + \
glob.glob(arguments.file_pathin+'/*.FTS') + glob.glob(arguments.file_pathin+'/*.fts') + \
glob.glob(arguments.file_pathin+'/*.FITS') + glob.glob(arguments.file_pathin+'/*.fits')
ffs = list(set(ffs)) # needed for dos
ffs.sort() # on linux wasn't sorted, on dos it was
f = open(arguments.file_pathout+'/summary.txt','w') # Creates summary text file
f.write('Streaks found in files: \n') #Creates first line for summary file
sf = arguments.start_frame
ef = arguments.end_frame
if sf <= 0:
sf = 1
if ef <= 0 or ef > len(ffs):
ef = len(ffs)
if ef < sf:
temp = sf
sf = ef
ef = temp
print('Processing %d files from %d to %d' % ((ef-sf+1), sf, ef))
for ff in ffs[sf-1:ef]:
# creates directory one directory back from the folder which contains fits files
num = do_one(ff,arguments.file_pathout+'/'+ff[ff.rfind(os.sep)+1:ff.rfind('.')],arguments.shape,arguments.area,arguments.contour)
if num == 0:
zero_image += 1
elif num < 0:
bad_image += 1
bad_image_paths.append(ff)
else:
detected += int(num) #Counter of how many streaks detected
f.write(ff + '\n')
fileCount += 1 #Counter for how many files analyzed
print("\n")
# Produce and write summary file
f.write('\n' 'Files analyzed: ' + str(fileCount)+ '\n' )
f.write('Streaks detected: ' + str(detected) + '\n' )
f.write('Files with no detections: ' + str(zero_image) + '\n')
f.write('Bad files: ' + str(bad_image)+ '\n')
temp_string = "\n"
temp_string = temp_string.join(bad_image_paths)
f.write(temp_string)
f.write('\n\n')
if arguments.diff:
f.write('Streaks found in Files: \n')
num = 0
detected = 0
fileCount = 0
zero_image = 0
bad_image = 0
bad_image_paths = []
dfs = []
# print('Computing %d differences' % (ef-sf+1))
for i in range(len(ffs)-1):
dfs.append(arguments.file_pathout+'/'+ffs[i+1][len(arguments.file_pathin):]+'DIFF')
# mk_diff(ffs[i],ffs[i+1],dfs[i],v)
if sf <= 0:
sf = 1
if ef <= 0 or ef > len(dfs):
ef = len(dfs)
if ef <= sf:
temp = sf
sf = ef
ef = temp
print('Processing %d files from %d to %d' % ((ef-sf+1), sf, ef))
i = sf-1
for df in dfs[sf-1:ef]:
try:
mk_diff(ffs[i],ffs[i+1],dfs[i],arguments.v)
# num = do_one(df,dsum+'/'+df[df.rfind(os.sep)+1:df.rfind('.')],shape,area,contour)
#diff_file = dsum+'/'+df[df.rfind(os.sep)+1:df.find('.')]+'DIFF'
#directory one directory back
new_dir = arguments.file_pathout+'/'+df[df.rfind(os.sep)+1:df.rfind('.')]+'DIFF'
num = do_one(df,new_dir,arguments.shape,arguments.area,arguments.contour)
os.remove(df)
except:
num=-1
sys.stdout.write('X')
if num == 0:
zero_image += 1
elif num < 0:
bad_image += 1
bad_image_paths.append(df)
else:
detected += int(num) #Counter of how many streaks detected
f.write(df + '\n')
fileCount += 1 #Counter for how many files analyzed
i += 1
print("\n")
# Produce and write summary file
f.write('\n' 'Files analyzed: ' + str(fileCount)+ '\n' )
f.write('Streaks detected: ' + str(detected) + '\n' )
f.write('Files with no detections: ' + str(zero_image) + '\n')
f.write('Bad files: ' + str(bad_image)+ '\n')
temp_string = "\n"
temp_string = temp_string.join(bad_image_paths)
f.write(temp_string)
f.close()
else:
f.close()
def do_one(ff,output_path,shape,area,contour):
"""
process a directory one fits-file (ff)
"""
try:
# Read a fits image and create a Streak instance.
streak = Streak(ff,output_path=output_path)
# Detect streaks.
# streak.shape_cut = .14
# streak.area_cut = 120
# streak.contour_threshold = 12
# Customization of values
streak.shape_cut = shape
streak.area_cut = area
streak.contour_threshold = contour
streak.detect()
n = len(streak.streaks)
except:
n = -1
if n > 0:
# Write outputs and plot figures.
streak.write_outputs()
streak.plot_figures()
if n == 0:
sys.stdout.write('.')
elif n < 0:
sys.stdout.write('X')
elif n < 10:
sys.stdout.write('%d' % n)
else:
sys.stdout.write('*')
sys.stdout.flush()
return n
#def do_one(ff,output_path=None,shape=None,area=None,contour=None): BACKUP
"""
process a directory one fits-file (ff)
"""
# Read a fits image and create a Streak instance.
streak = Streak(ff,output_path=output_path)
# Detect streaks.
# streak.shape_cut = .14
# streak.area_cut = 120
# streak.contour_threshold = 12
#Customization of values
streak.shape_cut = shape
streak.area_cut = area
streak.contour_threshold = contour
streak.detect()
n = len(streak.streaks)
# Write outputs and plot figures.
streak.write_outputs()
streak.plot_figures()
streakfile=output_path+"/streaks.txt"
fp=open(streakfile)
lines=fp.readlines()
fp.close()
#print("streaks found %d" % (len(lines)-1))
#print("%d " % (len(lines)-1))
n = len(lines)-1
if n == 0:
sys.stdout.write('.')
elif n < 10:
sys.stdout.write('%d' % n)
else:
sys.stdout.write('*')
sys.stdout.flush()
#Delete/move files
if n == 0:
shutil.rmtree(output_path)
return int(n)
#do_one('20151108_MD01_raw/IMG00681.FIT')
#do_dir('20151108_MD01_raw')
if __name__ == '__main__':
try:
arguments = get_arg(sys.argv)
except:
print("An error occored getting the arguments for the function\n")
sys.exit(0)
#Prints selected folders
print("Running in data directory %s" % arguments.file_pathin)
print("Outputting in data directory %s" % arguments.file_pathout)
do_dir(arguments)
#print("Running in data directory %s" % sys.argv[1])
#do_dir(sys.argv[1],sys.argv[2])
| {
"content_hash": "9a759dc13fcf82c99ded8e6eebd82f90",
"timestamp": "",
"source": "github",
"line_count": 423,
"max_line_length": 137,
"avg_line_length": 30.91725768321513,
"alnum_prop": 0.55918336137024,
"repo_name": "teuben/pyASC",
"id": "8ff50fc4c9c10a9f186ec3fc9b7e6e8415ce5732",
"size": "13140",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/try_astride.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1771159"
},
{
"name": "C++",
"bytes": "20438"
},
{
"name": "CSS",
"bytes": "190920"
},
{
"name": "HTML",
"bytes": "944350"
},
{
"name": "Hack",
"bytes": "6209"
},
{
"name": "JavaScript",
"bytes": "11521045"
},
{
"name": "M4",
"bytes": "7935"
},
{
"name": "Makefile",
"bytes": "59832"
},
{
"name": "PHP",
"bytes": "4255"
},
{
"name": "Python",
"bytes": "92678"
},
{
"name": "Shell",
"bytes": "106899"
},
{
"name": "Yacc",
"bytes": "2607"
},
{
"name": "sed",
"bytes": "506"
}
],
"symlink_target": ""
} |
import numpy as np
from .TimeField import TimeField
class TimeMassField(TimeField):
def __add__(self, other):
if not (isinstance(other, self.__class__)):
raise (
Exception(
"The two operands must be both children of " + str(self__class__)
)
)
arr = self.arr + other.arr
obj = self.__new__(self.__class__)
obj.__init__(arr, self.tss)
return obj
| {
"content_hash": "ff0087c639ff82fc74d4a6748d8e8ea2",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 85,
"avg_line_length": 29.125,
"alnum_prop": 0.5085836909871244,
"repo_name": "MPIBGC-TEE/CompartmentalSystems",
"id": "5f23b15542f73e1d255aa4b5ddcc421f7f00dc75",
"size": "506",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/CompartmentalSystems/bins/TimeMassField.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "895"
},
{
"name": "HTML",
"bytes": "35548556"
},
{
"name": "Jupyter Notebook",
"bytes": "131659124"
},
{
"name": "Makefile",
"bytes": "8783"
},
{
"name": "Python",
"bytes": "1119047"
},
{
"name": "Shell",
"bytes": "2348"
}
],
"symlink_target": ""
} |
from utils import load_mat
import pylab as pl
import numpy as np
from sklearn.cluster import KMeans
data = load_mat('heightWeight')
data = data['heightWeightData']
markers = 'Dox'
colors = 'rgb'
for i in range(3):
KM_model = KMeans(init='k-means++', n_clusters=i+1)
labels = KM_model.fit_predict(data[:, [1, 2]])
labels_unique = np.unique(labels)
fig = pl.figure(i)
for j in range(len(labels_unique)):
data_chosen = data[labels == labels_unique[j]]
pl.scatter(data_chosen[:, 1], data_chosen[:, 2],
marker=markers[j],
color=colors[j])
pl.title('k = %s' % (i+1))
pl.savefig('kmeansHeightWeight_%s.png' % (i+1))
pl.show()
| {
"content_hash": "14c15d12103165e892933cb57d2daec0",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 56,
"avg_line_length": 30.565217391304348,
"alnum_prop": 0.6073968705547653,
"repo_name": "shangliy/ee660",
"id": "5a8515150880e617e915243bf4737ba788ce9011",
"size": "726",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "python/demos/ch01/kmeansHeightWeight.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7515"
},
{
"name": "C",
"bytes": "654077"
},
{
"name": "C++",
"bytes": "241202"
},
{
"name": "CSS",
"bytes": "16501"
},
{
"name": "FORTRAN",
"bytes": "237793"
},
{
"name": "Groff",
"bytes": "11025"
},
{
"name": "HTML",
"bytes": "452787"
},
{
"name": "JavaScript",
"bytes": "54587"
},
{
"name": "Limbo",
"bytes": "18665"
},
{
"name": "M",
"bytes": "60278"
},
{
"name": "Makefile",
"bytes": "11329"
},
{
"name": "Mathematica",
"bytes": "518"
},
{
"name": "Matlab",
"bytes": "7867112"
},
{
"name": "Mercury",
"bytes": "435"
},
{
"name": "Objective-C",
"bytes": "502"
},
{
"name": "Perl",
"bytes": "261"
},
{
"name": "Python",
"bytes": "53630"
},
{
"name": "R",
"bytes": "5191"
},
{
"name": "Shell",
"bytes": "210"
},
{
"name": "TypeScript",
"bytes": "32028"
}
],
"symlink_target": ""
} |
"""OS related QA tests.
"""
import os
import os.path
from ganeti import utils
from ganeti import constants
from ganeti import pathutils
from qa import qa_config
from qa import qa_utils
from qa import qa_error
from qa_utils import AssertCommand, AssertIn, AssertNotIn
_TEMP_OS_NAME = "TEMP-Ganeti-QA-OS"
_TEMP_OS_PATH = os.path.join(pathutils.OS_SEARCH_PATH[0], _TEMP_OS_NAME)
(_ALL_VALID,
_ALL_INVALID,
_PARTIALLY_VALID) = range(1, 4)
def TestOsList():
"""gnt-os list"""
AssertCommand(["gnt-os", "list"])
def TestOsDiagnose():
"""gnt-os diagnose"""
AssertCommand(["gnt-os", "diagnose"])
def _TestOsModify(hvp_dict, fail=False):
"""gnt-os modify"""
cmd = ["gnt-os", "modify"]
for hv_name, hv_params in hvp_dict.items():
cmd.append("-H")
options = []
for key, value in hv_params.items():
options.append("%s=%s" % (key, value))
cmd.append("%s:%s" % (hv_name, ",".join(options)))
cmd.append(_TEMP_OS_NAME)
AssertCommand(cmd, fail=fail)
def _TestOsStates(os_name):
"""gnt-os modify, more stuff"""
cmd = ["gnt-os", "modify"]
for param in ["hidden", "blacklisted"]:
for val in ["yes", "no"]:
new_cmd = cmd + ["--%s" % param, val, os_name]
AssertCommand(new_cmd)
# check that double-running the command is OK
AssertCommand(new_cmd)
def _SetupTempOs(node, dirname, variant, valid):
"""Creates a temporary OS definition on the given node.
"""
sq = utils.ShellQuoteArgs
parts = [
sq(["rm", "-rf", dirname]),
sq(["mkdir", "-p", dirname]),
sq(["cd", dirname]),
sq(["ln", "-fs", "/bin/true", "export"]),
sq(["ln", "-fs", "/bin/true", "import"]),
sq(["ln", "-fs", "/bin/true", "rename"]),
sq(["ln", "-fs", "/bin/true", "verify"]),
]
if valid:
parts.append(sq(["ln", "-fs", "/bin/true", "create"]))
parts.append(sq(["echo", str(constants.OS_API_V20)]) +
" >ganeti_api_version")
parts.append(sq(["echo", variant]) + " >variants.list")
parts.append(sq(["echo", "funny this is funny"]) + " >parameters.list")
cmd = " && ".join(parts)
print(qa_utils.FormatInfo("Setting up %s with %s OS definition" %
(node.primary,
["an invalid", "a valid"][int(valid)])))
AssertCommand(cmd, node=node)
def _RemoveTempOs(node, dirname):
"""Removes a temporary OS definition.
"""
AssertCommand(["rm", "-rf", dirname], node=node)
def _TestOs(mode, rapi_cb):
"""Generic function for OS definition testing
"""
master = qa_config.GetMasterNode()
name = _TEMP_OS_NAME
variant = "default"
fullname = "%s+%s" % (name, variant)
dirname = _TEMP_OS_PATH
# Ensure OS is usable
cmd = ["gnt-os", "modify", "--hidden=no", "--blacklisted=no", name]
AssertCommand(cmd)
nodes = []
try:
for i, node in enumerate(qa_config.get("nodes")):
nodes.append(node)
if mode == _ALL_INVALID:
valid = False
elif mode == _ALL_VALID:
valid = True
elif mode == _PARTIALLY_VALID:
valid = bool(i % 2)
else:
raise AssertionError("Unknown mode %s" % mode)
_SetupTempOs(node, dirname, variant, valid)
# TODO: Use Python 2.6's itertools.permutations
for (hidden, blacklisted) in [(False, False), (True, False),
(False, True), (True, True)]:
# Change OS' visibility
cmd = ["gnt-os", "modify", "--hidden", ["no", "yes"][int(hidden)],
"--blacklisted", ["no", "yes"][int(blacklisted)], name]
AssertCommand(cmd)
# Diagnose, checking exit status
AssertCommand(["gnt-os", "diagnose"], fail=(mode != _ALL_VALID))
# Diagnose again, ignoring exit status
output = qa_utils.GetCommandOutput(master.primary,
"gnt-os diagnose || :")
for line in output.splitlines():
if line.startswith("OS: %s [global status:" % name):
break
else:
raise qa_error.Error("Didn't find OS '%s' in 'gnt-os diagnose'" % name)
# Check info for all
cmd = ["gnt-os", "info"]
output = qa_utils.GetCommandOutput(master.primary,
utils.ShellQuoteArgs(cmd))
AssertIn("%s:" % name, output.splitlines())
# Check info for OS
cmd = ["gnt-os", "info", name]
output = qa_utils.GetCommandOutput(master.primary,
utils.ShellQuoteArgs(cmd)).splitlines()
AssertIn("%s:" % name, output)
for (field, value) in [("valid", mode == _ALL_VALID),
("hidden", hidden),
("blacklisted", blacklisted)]:
AssertIn(" - %s: %s" % (field, value), output)
# Only valid OSes should be listed
cmd = ["gnt-os", "list", "--no-headers"]
output = qa_utils.GetCommandOutput(master.primary,
utils.ShellQuoteArgs(cmd))
if mode == _ALL_VALID and not (hidden or blacklisted):
assert_fn = AssertIn
else:
assert_fn = AssertNotIn
assert_fn(fullname, output.splitlines())
# Check via RAPI
if rapi_cb:
assert_fn(fullname, rapi_cb())
finally:
for node in nodes:
_RemoveTempOs(node, dirname)
def TestOsValid(rapi_cb):
"""Testing valid OS definition"""
return _TestOs(_ALL_VALID, rapi_cb)
def TestOsInvalid(rapi_cb):
"""Testing invalid OS definition"""
return _TestOs(_ALL_INVALID, rapi_cb)
def TestOsPartiallyValid(rapi_cb):
"""Testing partially valid OS definition"""
return _TestOs(_PARTIALLY_VALID, rapi_cb)
def TestOsModifyValid():
"""Testing a valid os modify invocation"""
hv_dict = {
constants.HT_XEN_PVM: {
constants.HV_ROOT_PATH: "/dev/sda5",
},
constants.HT_XEN_HVM: {
constants.HV_ACPI: False,
constants.HV_PAE: True,
},
}
return _TestOsModify(hv_dict)
def TestOsModifyInvalid():
"""Testing an invalid os modify invocation"""
hv_dict = {
"blahblahblubb": {"bar": ""},
}
return _TestOsModify(hv_dict, fail=True)
def TestOsStatesNonExisting():
"""Testing OS states with non-existing OS"""
AssertCommand(["test", "-e", _TEMP_OS_PATH], fail=True)
return _TestOsStates(_TEMP_OS_NAME)
| {
"content_hash": "d228ee5694f19b83e76c64b16dbd7a23",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 80,
"avg_line_length": 27.388646288209607,
"alnum_prop": 0.5808354591836735,
"repo_name": "ganeti/ganeti",
"id": "32c385897afa35bddb2734051525825f29a4512b",
"size": "7638",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qa/qa_os.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Haskell",
"bytes": "2518005"
},
{
"name": "JavaScript",
"bytes": "8847"
},
{
"name": "M4",
"bytes": "32518"
},
{
"name": "Makefile",
"bytes": "96845"
},
{
"name": "Python",
"bytes": "6254835"
},
{
"name": "Shell",
"bytes": "153137"
}
],
"symlink_target": ""
} |
from flask_wtf import FlaskForm
from wtforms import validators, StringField, SubmitField
class SdrForm(FlaskForm):
command = StringField('Command', [
validators.DataRequired(),
],default='rtl_sdr -f 100122000 -g 40 -s 2500000 -n 25000000 sdr-capture-$(date +%Y-%m-%d:%H:%M:%S).dat')
submit = SubmitField('Run')
| {
"content_hash": "ad315d9d6a61c5774cfda0b533a7ce33",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 109,
"avg_line_length": 41.5,
"alnum_prop": 0.6927710843373494,
"repo_name": "t4skforce/PenTestingUnit",
"id": "49be1a983103971aa8fa4daa19a914408d0352c8",
"size": "332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/app/blueprints/rtlsdr/forms/sdrform.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7238"
},
{
"name": "HTML",
"bytes": "212601"
},
{
"name": "JavaScript",
"bytes": "2830190"
},
{
"name": "Python",
"bytes": "38451"
},
{
"name": "Shell",
"bytes": "6381"
}
],
"symlink_target": ""
} |
import unittest
from prf.tests.prf_testcase import PrfTestCase
from prf.mongodb import get_document_cls
class TestMongoDB(PrfTestCase):
def setUp(self):
super(TestMongoDB, self).setUp()
self.drop_databases()
| {
"content_hash": "c209214fc2a77dd8b6a2e4cc04841393",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 46,
"avg_line_length": 21.09090909090909,
"alnum_prop": 0.728448275862069,
"repo_name": "vahana/prf",
"id": "7b61eb32add1f013ebcab84c3f35d5abcb413551",
"size": "232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prf/tests/test_mongodb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "167421"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.views.generic import RedirectView
from . import views
from .models import Article, DateArticle
date_based_info_dict = {
'queryset': Article.objects.all(),
'date_field': 'date_created',
'month_format': '%m',
}
object_list_dict = {
'queryset': Article.objects.all(),
'paginate_by': 2,
}
object_list_no_paginate_by = {
'queryset': Article.objects.all(),
}
numeric_days_info_dict = dict(date_based_info_dict, day_format='%d')
date_based_datefield_info_dict = dict(date_based_info_dict, queryset=DateArticle.objects.all())
urlpatterns = [
url(r'^accounts/login/$', auth_views.login, {'template_name': 'login.html'}),
url(r'^accounts/logout/$', auth_views.logout),
# Special URLs for particular regression cases.
url('^中文/target/$', views.index_page),
]
# redirects, both temporary and permanent, with non-ASCII targets
urlpatterns += [
url('^nonascii_redirect/$', RedirectView.as_view(
url='/中文/target/', permanent=False)),
url('^permanent_nonascii_redirect/$', RedirectView.as_view(
url='/中文/target/', permanent=True)),
]
# json response
urlpatterns += [
url(r'^json/response/$', views.json_response_view),
]
| {
"content_hash": "a705a60df49437a930b30740aa02b54a",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 95,
"avg_line_length": 28.520833333333332,
"alnum_prop": 0.6596055514974434,
"repo_name": "yephper/django",
"id": "31e5e052d3567fb07249a0ff831efa94acba99c0",
"size": "1405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/view_tests/generic_urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "1697381"
},
{
"name": "HTML",
"bytes": "390772"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "3172126"
},
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "13365273"
},
{
"name": "Shell",
"bytes": "837"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
} |
from flask import current_app as app
@app.route('/')
def index():
return 'Hello from %s' % app.config.SERVICE_NAME
| {
"content_hash": "122ac7e985575454ebb631a6d042ef72",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 49,
"avg_line_length": 19.666666666666668,
"alnum_prop": 0.6949152542372882,
"repo_name": "hamaxx/mlask",
"id": "8b9201916eb5fdda6ab1be145abc389efeea55d6",
"size": "143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13085"
}
],
"symlink_target": ""
} |
import click
import re
import sys
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
from datetime import datetime
# Usage something like:
# merge_callers.py -t TUMOR -n NORMAL -m1 MuTect1.vcf -m2 MuTect2.vcf -s strelka.vcf -i human_g1k_v37_decoy.fasta.fai
@click.command(context_settings = dict( help_option_names = ['-h', '--help'] ))
@click.option('--tumorID', '-t', type=str, help='Tumor sample ID', required=True)
@click.option('--normalID', '-n', type=str, help='Normal sample ID', required=True)
@click.option('--mutect1VCF', '-m1', type=str, help='MuTect1 VCF file', required=True)
@click.option('--mutect2VCF', '-m2', type=str, help='MuTect1 VCF file', required=True)
@click.option('--strelkaVCF', '-s', type=str, help='Strelka VCF file', required=True)
@click.option('--genomeIndex', '-i', type=str, help='Index of the used genome (generated by samtools faidx)', required=True)
def mergeVCFs(tumorid, normalid, mutect1vcf, mutect2vcf, strelkavcf, genomeindex):
# this is the main processing routine
mutect2=parse_mutect2(mutect2vcf)
mutect1=parse_mutect1(mutect1vcf,tumorid,normalid)
strelka=parse_strelka_snvs(strelkavcf)
generate_output(mutect1, mutect2, strelka, tumorid, normalid, genomeindex)
plot_allele_freqs(mutect1, mutect2, strelka, tumorid)
def plot_allele_freqs(mutect1, mutect2, strelka, tumorid):
#columns = ['MuTect1','MuTect2', 'Strelka', 'M1M2I_M1','M1M2I_M2' 'M1SI_M1', 'M1SI_S','M2SI_M2', 'M2SI_S','M1M2SI_M1','M1M2SI_M2','M1M2SI_S' ]
#columns = ['MuTect1_singletons','MuTect2_singletons', 'Strelka_singletons', 'M1M2I', 'M1SI', 'M2SI','M1M2SI']
columns = ['MuTect1_singletons','MuTect2_singletons','Strelka_singletons','MuTect1_all','MuTect2_all','Strelka_all','MuTect1_MuTect2','MuTect1_Strelka','MuTect2_Strelka','MuTect1_MuTect2_Strelka']
count = np.zeros((10), dtype=np.int)
#allele_freq=np.empty(12)
allele_freq=np.empty(10)
#allele_freq[:] = numpy.NAN
all_snvs=set(mutect1['snvs'].keys()+mutect2['snvs'].keys()+strelka['snvs'].keys())
antal=0
for pos in all_snvs:
#this_variant=np.empty(12)
this_variant=np.empty(10)
this_variant[:]=-999
vcfinfo = {}
#Which caller(s) detected the variant?
if pos in mutect1['snvs']:
vcfinfo['mutect1']=mutect1['snvs'][pos]['ad']['tumor']
if pos in mutect2['snvs']:
vcfinfo['mutect2']=mutect2['snvs'][pos]['ad']['tumor']
if pos in strelka['snvs']:
vcfinfo['strelka']=strelka['snvs'][pos]['ad']['tumor']
#Singletons
if 'mutect1' in vcfinfo.keys() and 'mutect2' not in vcfinfo.keys() and 'strelka' not in vcfinfo.keys():
this_variant[0]=float(vcfinfo['mutect1'].split(",")[1])/(float(vcfinfo['mutect1'].split(",")[0])+float(vcfinfo['mutect1'].split(",")[1]))
count[0]=count[0]+1
if 'mutect1' not in vcfinfo.keys() and 'mutect2' in vcfinfo.keys() and 'strelka' not in vcfinfo.keys():
this_variant[1]=float(vcfinfo['mutect2'].split(",")[1])/(float(vcfinfo['mutect2'].split(",")[0])+float(vcfinfo['mutect2'].split(",")[1]))
count[1]=count[1]+1
if this_variant[1]>1:
print this_variant[1]
print mutect2['snvs'][pos]['ad']['tumor']
if 'mutect1' not in vcfinfo.keys() and 'mutect2' not in vcfinfo.keys() and 'strelka' in vcfinfo.keys():
this_variant[2]=float(vcfinfo['strelka'].split(",")[1])/(float(vcfinfo['strelka'].split(",")[0])+float(vcfinfo['strelka'].split(",")[1]))
count[2]=count[2]+1
#All calles by callers
if 'mutect1' in vcfinfo.keys():
this_variant[3]=float(vcfinfo['mutect1'].split(",")[1])/(float(vcfinfo['mutect1'].split(",")[0])+float(vcfinfo['mutect1'].split(",")[1]))
count[3]=count[3]+1
if 'mutect2' in vcfinfo.keys():
this_variant[4]=float(vcfinfo['mutect2'].split(",")[1])/(float(vcfinfo['mutect2'].split(",")[0])+float(vcfinfo['mutect2'].split(",")[1]))
count[4]=count[4]+1
if 'strelka' in vcfinfo.keys():
this_variant[5]=float(vcfinfo['strelka'].split(",")[1])/(float(vcfinfo['strelka'].split(",")[0])+float(vcfinfo['strelka'].split(",")[1]))
count[5]=count[5]+1
#Intersection of two callers - allele frequencies calculated as mean of reported for callers
if 'mutect1' in vcfinfo.keys() and 'mutect2' in vcfinfo.keys():
#this_variant[3]=float(vcfinfo['mutect1'].split(",")[1])/(float(vcfinfo['mutect1'].split(",")[0])+float(vcfinfo['mutect1'].split(",")[1]))
#this_variant[4]=float(vcfinfo['mutect2'].split(",")[1])/(float(vcfinfo['mutect2'].split(",")[0])+float(vcfinfo['mutect2'].split(",")[1]))
this_variant[6]=(float(vcfinfo['mutect1'].split(",")[1])/(float(vcfinfo['mutect1'].split(",")[0])+float(vcfinfo['mutect1'].split(",")[1])) + float(vcfinfo['mutect2'].split(",")[1])/(float(vcfinfo['mutect2'].split(",")[0])+float(vcfinfo['mutect2'].split(",")[1])))/2
count[6]=count[6]+1
if 'mutect1' in vcfinfo.keys() and 'strelka' in vcfinfo.keys():
#this_variant[5]=float(vcfinfo['mutect1'].split(",")[1])/(float(vcfinfo['mutect1'].split(",")[0])+float(vcfinfo['mutect1'].split(",")[1]))
#this_variant[6]=float(vcfinfo['strelka'].split(",")[1])/(float(vcfinfo['strelka'].split(",")[0])+float(vcfinfo['strelka'].split(",")[1]))
this_variant[7]=(float(vcfinfo['mutect1'].split(",")[1])/(float(vcfinfo['mutect1'].split(",")[0])+float(vcfinfo['mutect1'].split(",")[1])) + float(vcfinfo['strelka'].split(",")[1])/(float(vcfinfo['strelka'].split(",")[0])+float(vcfinfo['strelka'].split(",")[1])))/2
count[7]=count[7]+1
if 'mutect2' in vcfinfo.keys() and 'strelka' in vcfinfo.keys():
#this_variant[7]=float(vcfinfo['mutect2'].split(",")[1])/(float(vcfinfo['mutect2'].split(",")[0])+float(vcfinfo['mutect2'].split(",")[1]))
#this_variant[8]=float(vcfinfo['strelka'].split(",")[1])/(float(vcfinfo['strelka'].split(",")[0])+float(vcfinfo['strelka'].split(",")[1]))
this_variant[8]=(float(vcfinfo['mutect2'].split(",")[1])/(float(vcfinfo['mutect2'].split(",")[0])+float(vcfinfo['mutect2'].split(",")[1]))+float(vcfinfo['strelka'].split(",")[1])/(float(vcfinfo['strelka'].split(",")[0])+float(vcfinfo['strelka'].split(",")[1])))/2
count[8]=count[8]+1
#Intersection of three callers - allele frequencies calculated as mean of reported for callers
if 'mutect1' in vcfinfo.keys() and 'mutect2' in vcfinfo.keys() and 'strelka' in vcfinfo.keys():
#this_variant[9]=float(vcfinfo['mutect1'].split(",")[1])/(float(vcfinfo['mutect1'].split(",")[0])+float(vcfinfo['mutect1'].split(",")[1]))
#this_variant[10]=float(vcfinfo['mutect2'].split(",")[1])/(float(vcfinfo['mutect2'].split(",")[0])+float(vcfinfo['mutect2'].split(",")[1]))
#this_variant[11]=float(vcfinfo['strelka'].split(",")[1])/(float(vcfinfo['strelka'].split(",")[0])+float(vcfinfo['strelka'].split(",")[1]))
this_variant[9]=(float(vcfinfo['mutect1'].split(",")[1])/(float(vcfinfo['mutect1'].split(",")[0])+float(vcfinfo['mutect1'].split(",")[1])) + float(vcfinfo['mutect2'].split(",")[1])/(float(vcfinfo['mutect2'].split(",")[0])+float(vcfinfo['mutect2'].split(",")[1])) + float(vcfinfo['strelka'].split(",")[1])/(float(vcfinfo['strelka'].split(",")[0])+float(vcfinfo['strelka'].split(",")[1])))/3
count[9]=count[9]+1
allele_freq=np.vstack((allele_freq, this_variant))
#Mask NaNs in allele_freq
masked_allele_freq=np.ma.masked_equal(allele_freq,-999)
allele_freqs_nonempty = [[y for y in row if y] for row in masked_allele_freq.T]
#Create plots and print to PDF file
numBoxes=10
pp = PdfPages(tumorid+'_allele_freqs.pdf')
fig, ax1 = plt.subplots(figsize=(10, 6))
plt.subplots_adjust(left=0.075, right=0.95, top=0.9, bottom=0.25)
x=range(1, len(columns)+1)
bp = plt.boxplot(allele_freqs_nonempty, notch=0, sym='+', vert=1, whis=1.5)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
# Hide these grid behind plot objects
ax1.set_axisbelow(True)
ax1.set_title('SNVs called in '+tumorid+'\n')
ax1.set_xlabel('Call set')
ax1.set_ylabel('Alternative allele frequency')
# Set the axes ranges and axes labels
ax1.set_xlim(0.5, numBoxes + 0.5)
top = 1.2
bottom = 0
ax1.set_ylim(bottom, top)
xtickNames = plt.setp(ax1, xticklabels=columns)
plt.setp(xtickNames, rotation=45, fontsize=8)
#Print counts and medians above the boxes
for tick, label in zip(x, count):
ax1.text(tick, 1.1, 'n = '+str(label),horizontalalignment='center', size='x-small')
median_values=[]
for medline in bp['medians']:
median_values.append(str(round(medline.get_ydata()[0],2)))
for tick, label in zip(x, median_values):
ax1.text(tick, 1, 'm = '+str(label),horizontalalignment='center', size='x-small')
plt.savefig(pp, format='pdf')
pp.close()
print 'printed results to '+tumorid+'_allele_freqs.pdf'
def generate_output(mutect1, mutect2, strelka, tumorid, normalid, genomeIndex):
snv_file=tumorid+'.snvs.vcf'
avinput=tumorid+'.avinput'
sf = open(snv_file, 'w')
ai = open(avinput, 'w')
sf.write("%s\n" %("##fileformat=VCFv4.2"))
sf.write("%s%s\n" %("##date=",str(datetime.now())))
sf.write("%s%s\n" %("##source=",sys.argv[0]))
sf.write("%s\n" %("##FILTER=<ID=CONCORDANT,Description=\"Called by all three callers (MuTect1, MuTect2 and Strelka)\""))
sf.write("%s\n" %("##FILTER=<ID=DISCORDANT,Description=\"NOT called by all three callers\""))
sf.write("%s\n" %("##INFO=<ID=M1,Number=.,Type=String,Description=\"Called by MuTect1\""))
sf.write("%s\n" %("##INFO=<ID=M2,Number=.,Type=String,Description=\"Called by MuTect2\""))
sf.write("%s\n" %("##INFO=<ID=S,Number=.,Type=String,Description=\"Called by Strelka\""))
sf.write("%s\n" %("##FORMAT=<ID=ADM1,Number=.,Type=Integer,Description=\"Allelic depths reported by MuTect1 for the ref and alt alleles in the order listed\""))
sf.write("%s\n" %("##FORMAT=<ID=ADM2,Number=.,Type=Integer,Description=\"Allelic depths reported by MuTect2 for the ref and alt alleles in the order listed\""))
sf.write("%s\n" %("##FORMAT=<ID=ADS,Number=.,Type=Integer,Description=\"Allelic depths reported by Strelka for the ref and alt alleles in the order listed\""))
sf.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" %('#CHROM', 'POS','ID', 'REF', 'ALT','QUAL', 'FILTER', 'INFO','FORMAT', tumorid, normalid))
#All mutated snvs:
all_snvs=set(mutect1['snvs'].keys()+mutect2['snvs'].keys()+strelka['snvs'].keys())
antal=0
sorted_pos=sort_positions(all_snvs, genomeIndex)
for pos in sorted_pos:
#for pos in all_snvs:
vcfinfo = {}
#Which caller(s) detected the variant?
if pos in mutect1['snvs']:
vcfinfo['mutect1']=mutect1['snvs'][pos]['info']
if pos in mutect2['snvs']:
vcfinfo['mutect2']=mutect2['snvs'][pos]['info']
if pos in strelka['snvs']:
vcfinfo['strelka']=strelka['snvs'][pos]['info']
called_by=vcfinfo.keys()
#Do we have the same basic info from all callers? Should be...
if all(value == vcfinfo[called_by[0]] for value in vcfinfo.values()):
format=''
gf_tumor=''
gf_normal=''
callers=''
for c in called_by:
if c=='mutect1':
callers=callers+'M1;'
format=format+'ADM1:'
gf_tumor=gf_tumor+mutect1['snvs'][pos]['ad']['tumor']+':'
gf_normal=gf_normal+mutect1['snvs'][pos]['ad']['normal']+':'
elif c=='mutect2':
callers=callers+'M2;'
format=format+'ADM2:'
gf_tumor=gf_tumor+mutect2['snvs'][pos]['ad']['tumor']+':'
gf_normal=gf_normal+mutect2['snvs'][pos]['ad']['normal']+':'
elif c=='strelka':
callers=callers+'S;'
format=format+'ADS:'
gf_tumor=gf_tumor+strelka['snvs'][pos]['ad']['tumor']+':'
gf_normal=gf_normal+strelka['snvs'][pos]['ad']['normal']+':'
callers = callers[:-1]
format = format[:-1]
gf_tumor = gf_tumor[:-1]
gf_normal = gf_normal[:-1]
antal = antal+1
filter="DISCORDANT"
if len(called_by)==3:
filter="CONCORDANT"
vcfinfolist=vcfinfo[called_by[0]].split('\t')
baseinfo=vcfinfolist[0]+'\t'+vcfinfolist[1]+'\tNA\t'+vcfinfolist[2]+'\t'+vcfinfolist[3]+'\t'+'.'
sf.write("%s\t%s\t%s\t%s\t%s\t%s\n" %(baseinfo,filter, callers, format, gf_tumor, gf_normal))
ai.write("%s\n" %(vcfinfo[called_by[0]]))
else:
print "Conflict in ref and alt alleles between callers "+called_by+" at pos "+pos
def sort_positions(positions, genomeIndex):
CHROMOSOMES = []
selected = []
sorted = []
for line in open(genomeIndex, 'r'):
line = line.strip()
info = line.split("\t")
CHROMOSOMES.append(info[0])
selected.append([])
for pos in positions:
chr_pos=pos.split("_")
if chr_pos[0] in CHROMOSOMES:
selected[CHROMOSOMES.index(chr_pos[0])].append(int(chr_pos[1]))
for chr in CHROMOSOMES:
selected[CHROMOSOMES.index(chr)].sort()
for pos in selected[CHROMOSOMES.index(chr)]:
sorted.append(chr+'_'+str(pos))
return sorted
def parse_mutect2(vcf):
snvs = {}
indels = {}
datacolumn = {}
for line in open(vcf, 'r'):
line=line.strip()
# Extract column in vcf file for "TUMOR" and "NORMAL"
if line.startswith("#CHROM"):
info = line.split("\t")
for col in range(9, len(info)):
if info[col] in ['TUMOR', 'NORMAL']:
datacolumn[info[col]] = col
else:
print "ERROR: MuTect2 VCF file does not contain column for TUMOR or NORMAL"
break
if not line.startswith("#"):
filter1=re.compile('alt_allele_in_normal')
filter2=re.compile('clustered_events')
filter3=re.compile('germline_risk')
filter4=re.compile('homologous_mapping_event')
filter5=re.compile('multi_event_alt_allele_in_normal')
filter6=re.compile('panel_of_normals')
filter7=re.compile('str_contraction')
filter8=re.compile('t_lod_fstar')
filter9=re.compile('triallelic_site')
f1=filter1.search(line)
f2=filter2.search(line)
f3=filter3.search(line)
f4=filter4.search(line)
f5=filter5.search(line)
f6=filter6.search(line)
f7=filter7.search(line)
f8=filter8.search(line)
f9=filter9.search(line)
if not (f1 or f2 or f3 or f4 or f5 or f6 or f7 or f8 or f9):
info=line.split("\t")
pos=info[0]+'_'+info[1]
vcfinfo=info[0]+'\t'+info[1]+'\t'+info[3]+'\t'+info[4]
ad_tumor=info[datacolumn['TUMOR']].split(":")[1]
ad_normal=info[datacolumn['NORMAL']].split(":")[1]
ref=info[3]
alt=info[4]
alt_alleles = alt.split(",")
if len(alt_alleles) == 1:
#Indels
if len(ref)>1 or len(alt)>1:
indels[pos] = {}
indels[pos]['info']=vcfinfo
indels[pos]['ad'] = {}
indels[pos]['ad']['tumor']=ad_tumor
indels[pos]['ad']['normal']=ad_normal
#snvs
else:
snvs[pos] = {}
snvs[pos]['info']=vcfinfo
snvs[pos]['ad'] = {}
snvs[pos]['ad']['tumor']=ad_tumor
snvs[pos]['ad']['normal']=ad_normal
else:
print "WARNING: MuTect2 variant with multiple alternative alleles detected; skipped and not used in merged callset:"
print line
return {'indels':indels,'snvs':snvs}
def parse_mutect1(vcf, tumorid, normalid):
snvs = {}
datacolumn = {}
for line in open(vcf, 'r'):
line=line.strip()
# Extract column in vcf file for each sample
if line.startswith("#CHROM"):
info = line.split("\t")
for col in range(9, len(info)):
if info[col] in [tumorid, normalid]:
datacolumn[info[col]]=col
else:
print "ERROR: sample ids other than "+tumorid+" or "+normalid+" detected in MuTect1 vcf"
break
if not line.startswith("#"):
filter1=re.compile('REJECT')
f1=filter1.search(line)
if not (f1):
info=line.split("\t")
pos = info[0] + '_' + info[1]
vcfinfo = info[0] + '\t' + info[1] + '\t' + info[3] + '\t' + info[4]
ad_tumor = info[datacolumn[tumorid]].split(":")[1]
ad_normal = info[datacolumn[normalid]].split(":")[1]
alt=info[4]
alt_alleles=alt.split(",")
if len(alt_alleles) == 1:
snvs[pos] = {}
snvs[pos]['info']=vcfinfo
snvs[pos]['ad'] = {}
snvs[pos]['ad']['tumor']=ad_tumor
snvs[pos]['ad']['normal']=ad_normal
else:
print "WARNING: MuTect1 variant with multiple alternative alleles detected; skipped and not used in merged callset."
print line
return {'snvs':snvs}
def parse_strelka_snvs(vcf):
snvs = {}
datacolumn = {}
for line in open(vcf, 'r'):
line=line.strip()
# Extract column in vcf file for "TUMOR" and "NORMAL"
if line.startswith("#CHROM"):
info = line.split("\t")
for col in range(9, len(info)):
if info[col] in ['TUMOR', 'NORMAL']:
datacolumn[info[col]] = col
else:
print "ERROR: Strelka VCF file does not contain column for TUMOR or NORMAL"
break
if not line.startswith("#"):
info=line.split("\t")
pos=info[0]+'_'+info[1]
ref=info[3]
alt=info[4]
ad_normal = {}
ad_tumor = {}
#Using tiers 2 data
ad_normal['A']=int(info[datacolumn['NORMAL']].split(":")[4].split(",")[1])
ad_normal['C']=int(info[datacolumn['NORMAL']].split(":")[5].split(",")[1])
ad_normal['G']=int(info[datacolumn['NORMAL']].split(":")[6].split(",")[1])
ad_normal['T']=int(info[datacolumn['NORMAL']].split(":")[7].split(",")[1])
ad_tumor['A'] = int(info[datacolumn['TUMOR']].split(":")[4].split(",")[1])
ad_tumor['C'] = int(info[datacolumn['TUMOR']].split(":")[5].split(",")[1])
ad_tumor['G'] = int(info[datacolumn['TUMOR']].split(":")[6].split(",")[1])
ad_tumor['T'] = int(info[datacolumn['TUMOR']].split(":")[7].split(",")[1])
snvs[pos] = {}
snvs[pos]['ad'] = {}
# If several alternative alleles are detected in the tumor, report the most highly abundant one and print a warning message.
alt_allele=''
alt_depth_tumor = 0
alt_alt_normal = 0
alt_alleles=alt.split(",")
for allele in alt_alleles:
if ad_tumor[allele] > alt_depth_tumor:
alt_depth_tumor=ad_tumor[allele]
alt_depth_normal=ad_normal[allele]
alt_allele=allele
if len(alt) > 1:
print "WARNING: Strelka variant with multiple alternative alleles detected. Reporting the alternative allele with highest read count:"
print line
vcfinfo = info[0] + '\t' + info[1] + '\t' + info[3] + '\t' + alt_allele
snvs[pos]['info'] = vcfinfo
snvs[pos]['ad']['tumor']=str(ad_tumor[ref])+','+str(alt_depth_tumor)
snvs[pos]['ad']['normal']=str(ad_normal[ref])+','+str(alt_depth_normal)
return {'snvs':snvs}
if __name__ == "__main__":
mergeVCFs()
| {
"content_hash": "cf615fa16bda123a54ee8c9c34250fb4",
"timestamp": "",
"source": "github",
"line_count": 397,
"max_line_length": 401,
"avg_line_length": 52.76070528967254,
"alnum_prop": 0.5580063019192208,
"repo_name": "SciLifeLab/CAW",
"id": "ccef82985f689935d8d854efdecb9a170c14466c",
"size": "20970",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scripts/merge_callers.py",
"mode": "33261",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import unittest
from itertools import islice
from django.test.runner import DiscoverRunner
from refarm_test_utils.results import TimedResult
class RefarmTestRunner(DiscoverRunner):
"""Check every test is tagged and show slowest tests table."""
def __init__(self, *args, top_slow=0, check_tags=True, rerun_failed=0, **kwargs):
super().__init__(*args, **kwargs)
self.top_slow = top_slow
self.check_tags = check_tags
self.rerun_failed = rerun_failed
self.tests_to_rerun = []
@classmethod
def add_arguments(cls, parser):
super().add_arguments(parser)
parser.add_argument(
'--top-slow', nargs='?', const=10, type=int,
metavar='N', help='Shows top N of slowest tests.',
)
parser.add_argument(
'--disable-tags-check', action='store_false', dest='check_tags',
help='Disables checking that each test is tagged.'
)
parser.add_argument(
'--rerun-failed', nargs='?', const=1, type=int, default=0,
metavar='N', help='Runs failed tests while they occur. Gives N tries.',
)
def build_suite(self, *args, **kwargs):
suite = super().build_suite(*args, **kwargs)
if self.check_tags:
check_tagged_tests(suite)
return suite
def get_resultclass(self):
return TimedResult if self.top_slow else super().get_resultclass()
def run_suite(self, suite, **kwargs):
result = super().run_suite(suite, **kwargs)
if self.rerun_failed > 0 and self.suite_result(suite, result) > 0:
self.tests_to_rerun = (
[f[0].id() for f in result.failures]
+ [e[0].id() for e in result.errors]
)
else:
self.tests_to_rerun = []
if self.top_slow:
assert isinstance(result, TimedResult), result
timings = list(islice(
sorted(
result.test_timings,
key=lambda t: t[1], # elapsed time
reverse=True,
),
self.top_slow
))
print('\nTop slowest tests:')
for i, (name, elapsed) in enumerate(timings, 1):
print(f'{i}. {elapsed:.2f} {name.splitlines()[0]}')
return result
def run_tests(self, test_labels, extra_tests=None, **kwargs):
suite_result = super().run_tests(test_labels, extra_tests, **kwargs)
if self.tests_to_rerun:
self.rerun_failed -= 1
delimiter = '\n\t- '
print(f'\nRerun tries left: {self.rerun_failed}')
print(f'Rerun these failed tests:{delimiter}{delimiter.join(self.tests_to_rerun)}\n')
suite_result = self.run_tests(self.tests_to_rerun, **kwargs)
return suite_result
def check_tagged_tests(suite):
# get the tags processing from:
# django.test.runner.filter_tests_by_tags
# https://github.com/django/django/blob/master/django/test/runner.py#L717
suite_class = type(suite)
for test in suite:
if isinstance(test, suite_class):
# check a nested suite
check_tagged_tests(test)
elif not isinstance(test, unittest.loader._FailedTest):
# check a non failed test
test_tags = set(getattr(test, 'tags', set()))
test_fn_name = getattr(test, '_testMethodName', str(test))
test_fn = getattr(test, test_fn_name, test)
test_fn_tags = set(getattr(test_fn, 'tags', set()))
if not test_tags.union(test_fn_tags):
raise Exception(
f'{test_fn_name} is not tagged. You have to decorate it '
'with tag("slow") or tag("fast").'
)
| {
"content_hash": "69ad1e749defe543f5407af670be3381",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 97,
"avg_line_length": 36.59615384615385,
"alnum_prop": 0.5627955859169732,
"repo_name": "fidals/refarm-site",
"id": "af4c3aa28a41e2e19c1fbbb80f8791fcb54af1f4",
"size": "3806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "refarm_test_utils/runners.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "64305"
},
{
"name": "HTML",
"bytes": "28723"
},
{
"name": "JavaScript",
"bytes": "31422"
},
{
"name": "Python",
"bytes": "296885"
}
],
"symlink_target": ""
} |
"""
Test different error functions as isolated units.
"""
from unittest import mock
from espei.optimizers.opt_mcmc import EmceeOptimizer
from espei.utils import database_symbols_to_fit
import numpy as np
import pytest
import pickle
import scipy.stats
from tinydb import where
from pycalphad import Database, Model, variables as v
from espei.paramselect import generate_parameters
from espei.error_functions import *
from espei.error_functions.activity_error import ActivityResidual
from espei.error_functions.equilibrium_thermochemical_error import calc_prop_differences, EquilibriumPropertyResidual
from espei.error_functions.non_equilibrium_thermochemical_error import FixedConfigurationPropertyResidual
from espei.error_functions.zpf_error import calculate_zpf_driving_forces, ZPFResidual
from espei.error_functions.context import setup_context
from espei.utils import unpack_piecewise, ModelTestException
from .fixtures import datasets_db
from .testing_data import *
def test_activity_error(datasets_db):
"""Test that activity error returns a correct result"""
datasets_db.insert(CU_MG_EXP_ACTIVITY)
dbf = Database(CU_MG_TDB)
error = calculate_activity_error(dbf, ['CU','MG','VA'], list(dbf.phases.keys()), datasets_db, {}, {}, {})
assert np.isclose(error, -257.41020886970756, rtol=1e-6)
def test_activity_residual_function(datasets_db):
dbf = Database(CU_MG_TDB)
datasets_db.insert(CU_MG_EXP_ACTIVITY)
residual_func = ActivityResidual(dbf, datasets_db, phase_models=None, symbols_to_fit=[])
# Regression test "truth" values - got values by running
residuals, weights = residual_func.get_residuals(np.asarray([]))
assert len(residuals) == len(weights)
assert np.allclose(residuals, [6522.652187085958, -1890.1414208991046, -4793.211215856485, -3018.311675280318, -1062.6724585088668, -2224.814500229084, -2256.9820026771777, -1735.8692674535414, -805.219891012428, 0.0])
likelihood = residual_func.get_likelihood(np.asarray([]))
assert np.isclose(likelihood, -257.41020886970756, rtol=1e-6)
def test_subsystem_activity_probability(datasets_db):
"""Test binary Cr-Ni data produces the same probability regardless of whether the main system is a binary or ternary."""
datasets_db.insert(CR_NI_ACTIVITY)
dbf_bin = Database(CR_NI_TDB)
dbf_tern = Database(CR_FE_NI_TDB)
phases = list(dbf_tern.phases.keys())
# Truth
bin_prob = calculate_activity_error(dbf_bin, ['CR','NI','VA'], phases, datasets_db, {}, {}, {})
# Getting binary subsystem data explictly (from binary input)
prob = calculate_activity_error(dbf_tern, ['CR','NI','VA'], phases, datasets_db, {}, {}, {})
assert np.isclose(prob, bin_prob)
# Getting binary subsystem from ternary input
prob = calculate_activity_error(dbf_tern, ['CR', 'FE', 'NI', 'VA'], phases, datasets_db, {}, {}, {})
assert np.isclose(prob, bin_prob)
def test_get_thermochemical_data_filters_invalid_sublattice_configurations(datasets_db):
datasets_db.insert(CU_MG_HM_MIX_CUMG2_ANTISITE)
dbf = Database(CU_MG_TDB)
comps = ["CU", "MG", "VA"]
phases = ["CUMG2"]
thermochemical_data = get_thermochemical_data(dbf, comps, phases, datasets_db)
print('thermochemical data:', thermochemical_data)
assert thermochemical_data[0]["calculate_dict"]["values"].shape == (2,)
error = calculate_non_equilibrium_thermochemical_probability(thermochemical_data)
assert np.isclose(error, -14.28729)
def test_fixed_configuration_residual_function(datasets_db):
dbf = Database(CU_MG_TDB)
datasets_db.insert(CU_MG_HM_MIX_CUMG2_ANTISITE)
residual_func = FixedConfigurationPropertyResidual(dbf, datasets_db, phase_models=None, symbols_to_fit=[])
# Regression test "truth" values - got values by running
residuals, weights = residual_func.get_residuals(np.asarray([]))
assert len(residuals) == len(weights)
assert np.allclose(residuals, [-10.0, -100.0])
likelihood = residual_func.get_likelihood(np.asarray([]))
assert np.isclose(likelihood, -14.28729, rtol=1e-6)
def test_get_thermochemical_data_filters_configurations_when_all_configurations_are_invalid(datasets_db):
datasets_db.insert(CU_MG_HM_MIX_CUMG2_ALL_INVALID) # No valid configurations
dbf = Database(CU_MG_TDB)
comps = ["CU", "MG", "VA"]
phases = ["CUMG2"]
thermochemical_data = get_thermochemical_data(dbf, comps, phases, datasets_db)
print('thermochemical data:', thermochemical_data)
assert thermochemical_data[0]["calculate_dict"]["values"].shape == (0,)
error = calculate_non_equilibrium_thermochemical_probability(thermochemical_data)
assert np.isclose(error, 0)
def test_non_equilibrium_thermochemical_error_with_multiple_X_points(datasets_db):
"""Multiple composition datapoints in a dataset for a mixing phase should be successful."""
datasets_db.insert(CU_MG_CPM_MIX_X_HCP_A3)
dbf = Database(CU_MG_TDB)
phases = list(dbf.phases.keys())
comps = ['CU', 'MG', 'VA']
thermochemical_data = get_thermochemical_data(dbf, comps, phases, datasets_db)
error = calculate_non_equilibrium_thermochemical_probability(thermochemical_data)
assert np.isclose(error, -4061.119001241541, rtol=1e-6)
def test_non_equilibrium_thermochemical_error_with_multiple_T_points(datasets_db):
"""Multiple temperature datapoints in a dataset for a stoichiometric comnpound should be successful."""
datasets_db.insert(CU_MG_HM_MIX_T_CUMG2)
dbf = Database(CU_MG_TDB)
phases = list(dbf.phases.keys())
comps = ['CU', 'MG', 'VA']
thermochemical_data = get_thermochemical_data(dbf, comps, phases, datasets_db)
error = calculate_non_equilibrium_thermochemical_probability(thermochemical_data)
assert np.isclose(error,-14.287293263253728, rtol=1e-6)
def test_non_equilibrium_thermochemical_error_with_multiple_T_X_points(datasets_db):
"""Multiple temperature and composition datapoints in a dataset for a mixing phase should be successful."""
datasets_db.insert(CU_MG_SM_MIX_T_X_FCC_A1)
dbf = Database(CU_MG_TDB)
phases = list(dbf.phases.keys())
comps = ['CU', 'MG', 'VA']
thermochemical_data = get_thermochemical_data(dbf, comps, phases, datasets_db)
error = calculate_non_equilibrium_thermochemical_probability(thermochemical_data)
assert np.isclose(float(error), -3282497.2380024833, rtol=1e-6)
def test_non_equilibrium_thermochemical_error_for_mixing_entropy_error_is_excess_only(datasets_db):
"""Tests that error in mixing entropy data is excess only (the ideal part is removed)."""
# If this fails, make sure the ideal mixing contribution is removed.
phase_models = {
"components": ["AL", "B"],
"phases": {
"LIQUID" : {
"sublattice_model": [["AL", "B"]],
"sublattice_site_ratios": [1]
},
"FCC_A1" : {
"sublattice_model": [["AL", "B"]],
"sublattice_site_ratios": [1]
}
}
}
dataset_excess_mixing = {
"components": ["AL", "B"],
"phases": ["FCC_A1"],
"solver": {
"sublattice_site_ratios": [1],
"sublattice_occupancies": [[[0.5, 0.5]]],
"sublattice_configurations": [[["AL", "B"]]],
"mode": "manual"
},
"conditions": {
"P": 101325,
"T": 298.15
},
"output": "SM_MIX",
"values": [[[10]]],
"excluded_model_contributions": ["idmix"]
}
datasets_db.insert(dataset_excess_mixing)
dbf = generate_parameters(phase_models, datasets_db, 'SGTE91', 'linear')
assert dbf.elements == {'AL', 'B'}
assert set(dbf.phases.keys()) == {'LIQUID', 'FCC_A1'}
assert len(dbf._parameters.search(where('parameter_type') == 'L')) == 1
phases = list(dbf.phases.keys())
comps = list(dbf.elements)
# the error should be exactly 0 because we are only fitting to one point
# the dataset is excess only
zero_error_prob = scipy.stats.norm(loc=0, scale=0.2).logpdf(0.0) # SM weight = 0.2
# Explicitly pass parameters={} to not try fitting anything
thermochemical_data = get_thermochemical_data(dbf, comps, phases, datasets_db, symbols_to_fit=[])
error = calculate_non_equilibrium_thermochemical_probability(thermochemical_data)
assert np.isclose(error, zero_error_prob, atol=1e-6)
def test_non_equilibrium_thermochemical_error_for_of_enthalpy_mixing(datasets_db):
"""Tests that error in mixing enthalpy data is calculated correctly"""
phase_models = {
"components": ["AL", "B"],
"phases": {
"LIQUID" : {
"sublattice_model": [["AL", "B"]],
"sublattice_site_ratios": [1]
},
"FCC_A1" : {
"sublattice_model": [["AL", "B"]],
"sublattice_site_ratios": [1]
}
}
}
dataset_excess_mixing = {
"components": ["AL", "B"],
"phases": ["FCC_A1"],
"solver": {
"sublattice_site_ratios": [1],
"sublattice_occupancies": [[[0.5, 0.5]]],
"sublattice_configurations": [[["AL", "B"]]],
"mode": "manual"
},
"conditions": {
"P": 101325,
"T": 298.15
},
"output": "HM_MIX",
"values": [[[10000]]],
"excluded_model_contributions": ["idmix"]
}
datasets_db.insert(dataset_excess_mixing)
dbf = generate_parameters(phase_models, datasets_db, 'SGTE91', 'linear')
assert dbf.elements == {'AL', 'B'}
assert set(dbf.phases.keys()) == {'LIQUID', 'FCC_A1'}
assert len(dbf._parameters.search(where('parameter_type') == 'L')) == 1
phases = list(dbf.phases.keys())
comps = list(dbf.elements)
# the error should be exactly 0 because we are only fitting to one point
# the dataset is excess only
zero_error_prob = scipy.stats.norm(loc=0, scale=500.0).logpdf(0.0) # HM weight = 500
# Explicitly pass parameters={} to not try fitting anything
thermochemical_data = get_thermochemical_data(dbf, comps, phases, datasets_db, symbols_to_fit=[])
error = calculate_non_equilibrium_thermochemical_probability(thermochemical_data)
assert np.isclose(error, zero_error_prob, atol=1e-6)
def test_subsystem_non_equilibrium_thermochemcial_probability(datasets_db):
"""Test binary Cr-Ni data produces the same probability regardless of whether the main system is a binary or ternary."""
datasets_db.insert(CR_NI_LIQUID_DATA)
dbf_bin = Database(CR_NI_TDB)
dbf_tern = Database(CR_FE_NI_TDB)
phases = list(dbf_tern.phases.keys())
# Truth
thermochemical_data = get_thermochemical_data(dbf_bin, ['CR', 'NI', 'VA'], phases, datasets_db)
bin_prob = calculate_non_equilibrium_thermochemical_probability(thermochemical_data)
# Getting binary subsystem data explictly (from binary input)
thermochemical_data = get_thermochemical_data(dbf_tern, ['CR', 'NI', 'VA'], phases, datasets_db)
prob = calculate_non_equilibrium_thermochemical_probability(thermochemical_data)
assert np.isclose(prob, bin_prob)
# Getting binary subsystem from ternary input
thermochemical_data = get_thermochemical_data(dbf_tern, ['CR', 'FE', 'NI', 'VA'], phases, datasets_db)
prob = calculate_non_equilibrium_thermochemical_probability(thermochemical_data)
assert np.isclose(prob, bin_prob)
def test_zpf_error_zero(datasets_db):
"""Test that sum of square ZPF errors returns 0 for an exactly correct result"""
datasets_db.insert(CU_MG_DATASET_ZPF_ZERO_ERROR)
dbf = Database(CU_MG_TDB)
comps = ['CU','MG','VA']
phases = list(dbf.phases.keys())
# ZPF weight = 1 kJ and there are two points in the tieline
zero_error_prob = 2 * scipy.stats.norm(loc=0, scale=1000.0).logpdf(0.0)
zpf_data = get_zpf_data(dbf, comps, phases, datasets_db, {})
error = calculate_zpf_error(zpf_data, np.array([]))
assert np.isclose(error, zero_error_prob, rtol=1e-6)
def test_zpf_residual_function(datasets_db):
dbf = Database(CU_MG_TDB)
datasets_db.insert(CU_MG_DATASET_ZPF_ZERO_ERROR)
residual_func = ZPFResidual(dbf, datasets_db, phase_models=None, symbols_to_fit=[])
# Regression test "truth" values - got values by running
residuals, weights = residual_func.get_residuals(np.asarray([]))
assert len(residuals) == len(weights)
assert np.allclose(residuals, [0.0, 0.0], atol=1e-3) # looser tolerance due to numerical instabilities
likelihood = residual_func.get_likelihood(np.asarray([]))
# ZPF weight = 1 kJ and there are two points in the tieline
zero_error_prob = np.sum(scipy.stats.norm(loc=0, scale=1000.0).logpdf([0.0, 0.0]))
assert np.isclose(likelihood, zero_error_prob, rtol=1e-6)
def test_subsystem_zpf_probability(datasets_db):
"""Test binary Cr-Ni data produces the same probability regardless of whether the main system is a binary or ternary."""
datasets_db.insert(CR_NI_ZPF_DATA)
dbf_bin = Database(CR_NI_TDB)
dbf_tern = Database(CR_FE_NI_TDB)
phases = list(dbf_tern.phases.keys())
# Truth
zpf_data = get_zpf_data(dbf_bin, ['CR', 'NI', 'VA'], phases, datasets_db, {})
bin_prob = calculate_zpf_error(zpf_data, np.array([]))
# Getting binary subsystem data explictly (from binary input)
zpf_data = get_zpf_data(dbf_tern, ['CR', 'NI', 'VA'], phases, datasets_db, {})
prob = calculate_zpf_error(zpf_data, np.array([]))
assert np.isclose(prob, bin_prob)
# Getting binary subsystem from ternary input
zpf_data = get_zpf_data(dbf_tern, ['CR', 'FE', 'NI', 'VA'], phases, datasets_db, {})
prob = calculate_zpf_error(zpf_data, np.array([]))
assert np.isclose(prob, bin_prob)
def test_zpf_error_species(datasets_db):
"""Tests that ZPF error works if a species is used."""
# Note that the liquid is stabilized by the species for the equilibrium
# used in the data. If the SPECIES is removed from the database (and LIQUID
# constituents), then the resulting likelihood will NOT match this (and be
# closer to 93, according to a test.)
datasets_db.insert(LI_SN_ZPF_DATA)
dbf = Database(LI_SN_TDB)
comps = ['LI', 'SN']
phases = list(dbf.phases.keys())
# ZPF weight = 1 kJ and there are two points in the tieline
zero_error_probability = 2 * scipy.stats.norm(loc=0, scale=1000.0).logpdf(0.0)
zpf_data = get_zpf_data(dbf, comps, phases, datasets_db, {})
exact_likelihood = calculate_zpf_error(zpf_data, approximate_equilibrium=False)
assert np.isclose(exact_likelihood, zero_error_probability)
approx_likelihood = calculate_zpf_error(zpf_data, approximate_equilibrium=True)
# accept higher tolerance for approximate
assert np.isclose(approx_likelihood, zero_error_probability, rtol=1e-4)
def test_zpf_error_equilibrium_failure(datasets_db):
"""Test that a target hyperplane producing NaN chemical potentials gives a driving force of zero."""
datasets_db.insert(CU_MG_DATASET_ZPF_NAN_EQUILIBRIUM)
dbf = Database(CU_MG_TDB)
comps = ['CU','MG','VA']
phases = list(dbf.phases.keys())
# ZPF weight = 1 kJ and there are two points in the tieline
zero_error_probability = 2 * scipy.stats.norm(loc=0, scale=1000.0).logpdf(0.0)
zpf_data = get_zpf_data(dbf, comps, phases, datasets_db, {})
with mock.patch('espei.error_functions.zpf_error.estimate_hyperplane', return_value=np.array([np.nan, np.nan])):
exact_likelihood = calculate_zpf_error(zpf_data)
assert np.isclose(exact_likelihood, zero_error_probability, rtol=1e-6)
approx_likelihood = calculate_zpf_error(zpf_data)
assert np.isclose(approx_likelihood, zero_error_probability, rtol=1e-6)
def test_zpf_error_works_for_stoichiometric_cmpd_tielines(datasets_db):
"""A stochimetric compound with approximate composition can be in the datasets and work"""
datasets_db.insert(CU_MG_DATASET_ZPF_STOICH_COMPOUND)
dbf = Database(CU_MG_TDB)
comps = ['CU','MG']
phases = list(dbf.phases.keys())
# ZPF weight = 1 kJ and there are two points in the tieline
zero_error_probability = 2 * scipy.stats.norm(loc=0, scale=1000.0).logpdf(0.0)
zpf_data = get_zpf_data(dbf, comps, phases, datasets_db, {})
exact_likelihood = calculate_zpf_error(zpf_data)
assert np.isclose(exact_likelihood, zero_error_probability, rtol=1e-6)
approx_likelihood = calculate_zpf_error(zpf_data)
assert np.isclose(approx_likelihood, zero_error_probability, rtol=1e-6)
def test_non_equilibrium_thermochemcial_species(datasets_db):
"""Test species work for non-equilibrium thermochemical data."""
datasets_db.insert(LI_SN_LIQUID_DATA)
dbf = Database(LI_SN_TDB)
phases = ['LIQUID']
thermochemical_data = get_thermochemical_data(dbf, ['LI', 'SN'], phases, datasets_db)
prob = calculate_non_equilibrium_thermochemical_probability(thermochemical_data)
# Near zero error and non-zero error
assert np.isclose(prob, (-7.13354663 + -22.43585011))
def test_equilibrium_thermochemcial_error_species(datasets_db):
"""Test species work for equilibrium thermochemical data."""
datasets_db.insert(LI_SN_LIQUID_EQ_DATA)
dbf = Database(LI_SN_TDB)
phases = list(dbf.phases.keys())
eqdata = get_equilibrium_thermochemical_data(dbf, ['LI', 'SN'], phases, datasets_db)
# Thermo-Calc
truth_values = np.array([0.0, -28133.588, -40049.995, 0.0])
# Approximate
errors_approximate, weights = calc_prop_differences(eqdata[0], np.array([]), True)
# Looser tolerances because the equilibrium is approximate, note that this is pdens dependent
assert np.all(np.isclose(errors_approximate, truth_values, atol=1e-5, rtol=1e-3))
# Exact
errors_exact, weights = calc_prop_differences(eqdata[0], np.array([]), False)
assert np.all(np.isclose(errors_exact, truth_values, atol=1e-5))
def test_equilibrium_thermochemical_error_unsupported_property(datasets_db):
"""Test that an equilibrium property that is not explictly supported will work."""
# This test specifically tests Curie temperature
datasets_db.insert(CR_NI_LIQUID_EQ_TC_DATA)
EXPECTED_VALUES = np.array([374.6625, 0.0, 0.0]) # the TC should be 374.6625 in both cases, but "values" are [0 and 382.0214], so the differences should be flipped.
dbf = Database(CR_NI_TDB)
phases = list(dbf.phases.keys())
eqdata = get_equilibrium_thermochemical_data(dbf, ['CR', 'NI'], phases, datasets_db)
errors_exact, weights = calc_prop_differences(eqdata[0], np.array([]))
assert np.all(np.isclose(errors_exact, EXPECTED_VALUES, atol=1e-3))
def test_equilibrium_property_residual_function(datasets_db):
dbf = Database(CR_NI_TDB)
datasets_db.insert(CR_NI_LIQUID_EQ_TC_DATA)
residual_func = EquilibriumPropertyResidual(dbf, datasets_db, phase_models=None, symbols_to_fit=[])
residuals, weights = residual_func.get_residuals(np.asarray([]))
assert len(residuals) == len(weights)
assert np.allclose(residuals, [374.6625, 0.0, 0.0])
# Regression test "truth" values - got values by running
likelihood = residual_func.get_likelihood(np.asarray([]))
assert np.isclose(likelihood, -70188.75126872442, rtol=1e-6)
def test_equilibrium_thermochemical_error_computes_correct_probability(datasets_db):
"""Integration test for equilibrium thermochemical error."""
datasets_db.insert(CU_MG_EQ_HMR_LIQUID)
dbf = Database(CU_MG_TDB)
phases = list(dbf.phases.keys())
# Test that errors update in response to changing parameters
# no parameters
eqdata = get_equilibrium_thermochemical_data(dbf, ['CU', 'MG'], phases, datasets_db)
errors, weights = calc_prop_differences(eqdata[0], np.array([]))
expected_vals = [-31626.6*0.5*0.5]
assert np.all(np.isclose(errors, expected_vals))
# VV0017 (LIQUID, L0)
eqdata = get_equilibrium_thermochemical_data(dbf, ['CU', 'MG'], phases, datasets_db, parameters={'VV0017': -31626.6})
# unchanged, should be the same as before
errors, weights = calc_prop_differences(eqdata[0], np.array([-31626.6]))
assert np.all(np.isclose(errors, [-31626.6*0.5*0.5]))
# change to -40000
errors, weights = calc_prop_differences(eqdata[0], np.array([-40000], np.float_))
assert np.all(np.isclose(errors, [-40000*0.5*0.5]))
def test_driving_force_miscibility_gap(datasets_db):
datasets_db.insert(A_B_DATASET_ALPHA)
dbf = Database(A_B_REGULAR_SOLUTION_TDB)
parameters = {"L_ALPHA": None}
zpf_data = get_zpf_data(dbf, ["A", "B"], ["ALPHA"], datasets_db, parameters)
# probability for zero error error with ZPF weight = 1000.0
zero_error_prob = scipy.stats.norm(loc=0, scale=1000.0).logpdf(0.0)
# Ideal solution case
params = np.array([0.0])
prob = calculate_zpf_error(zpf_data, parameters=params, approximate_equilibrium=False)
assert np.isclose(prob, zero_error_prob)
prob = calculate_zpf_error(zpf_data, parameters=params, approximate_equilibrium=True)
assert np.isclose(prob, zero_error_prob)
# Negative interaction case
params = np.array([-10000.0])
prob = calculate_zpf_error(zpf_data, parameters=params, approximate_equilibrium=False)
assert np.isclose(prob, zero_error_prob)
prob = calculate_zpf_error(zpf_data, parameters=params, approximate_equilibrium=True)
assert np.isclose(prob, zero_error_prob)
# Miscibility gap case
params = np.array([10000.0])
prob = calculate_zpf_error(zpf_data, parameters=params, approximate_equilibrium=False)
# Remember these are log probabilities, so more negative means smaller probability and larger error
assert prob < zero_error_prob
prob = calculate_zpf_error(zpf_data, parameters=params, approximate_equilibrium=True)
assert prob < zero_error_prob
def test_setting_up_context_with_custom_models(datasets_db):
phase_models = {
"components": ["CU", "MG", "VA"],
"phases": {
"LIQUID" : {
"sublattice_model": [["CU", "MG"]],
"sublattice_site_ratios": [1],
"model": "espei.utils.ErrorModel"
},
"FCC_A1": {
"sublattice_model": [["CU", "MG"], ["VA"]],
"sublattice_site_ratios": [1, 1]
}
}
}
dbf = Database(CU_MG_TDB)
# Should work without error
ctx = setup_context(dbf, datasets_db, phase_models=phase_models)
# Once we have data, the ErrorModel should be built and raise
datasets_db.insert(CU_MG_DATASET_ZPF_ZERO_ERROR)
with pytest.raises(ModelTestException):
ctx = setup_context(dbf, datasets_db, phase_models=phase_models)
def test_zpf_context_is_pickleable(datasets_db):
"""Test that the context for ZPF data is pickleable"""
datasets_db.insert(CU_MG_DATASET_ZPF_ZERO_ERROR)
dbf = Database(CU_MG_TDB)
symbols_to_fit = database_symbols_to_fit(dbf)
initial_guess = np.array([unpack_piecewise(dbf.symbols[s]) for s in symbols_to_fit])
prior_dict = EmceeOptimizer.get_priors(None, symbols_to_fit, initial_guess)
ctx = setup_context(dbf, datasets_db)
ctx.update(prior_dict)
ctx_pickle = pickle.dumps(ctx)
ctx_unpickled = pickle.loads(ctx_pickle)
regular_predict = EmceeOptimizer.predict(initial_guess, **ctx)
unpickle_predict = EmceeOptimizer.predict(initial_guess, **ctx_unpickled)
assert np.isclose(regular_predict, unpickle_predict)
def test_activity_context_is_pickleable(datasets_db):
"""Test that the context for activity data is pickleable"""
datasets_db.insert(CU_MG_EXP_ACTIVITY)
dbf = Database(CU_MG_TDB)
symbols_to_fit = database_symbols_to_fit(dbf)
initial_guess = np.array([unpack_piecewise(dbf.symbols[s]) for s in symbols_to_fit])
prior_dict = EmceeOptimizer.get_priors(None, symbols_to_fit, initial_guess)
ctx = setup_context(dbf, datasets_db)
ctx.update(prior_dict)
ctx_pickle = pickle.dumps(ctx)
ctx_unpickled = pickle.loads(ctx_pickle)
regular_predict = EmceeOptimizer.predict(initial_guess, **ctx)
unpickle_predict = EmceeOptimizer.predict(initial_guess, **ctx_unpickled)
assert np.isclose(regular_predict, unpickle_predict)
def test_non_equilibrium_thermochemical_context_is_pickleable(datasets_db):
"""Test that the context for non-equilibrium thermochemical data is pickleable"""
datasets_db.insert(CU_MG_CPM_MIX_X_HCP_A3)
datasets_db.insert(CU_MG_SM_MIX_T_X_FCC_A1)
dbf = Database(CU_MG_TDB)
symbols_to_fit = database_symbols_to_fit(dbf)
initial_guess = np.array([unpack_piecewise(dbf.symbols[s]) for s in symbols_to_fit])
prior_dict = EmceeOptimizer.get_priors(None, symbols_to_fit, initial_guess)
ctx = setup_context(dbf, datasets_db)
ctx.update(prior_dict)
ctx_pickle = pickle.dumps(ctx)
ctx_unpickled = pickle.loads(ctx_pickle)
regular_predict = EmceeOptimizer.predict(initial_guess, **ctx)
unpickle_predict = EmceeOptimizer.predict(initial_guess, **ctx_unpickled)
assert np.isclose(regular_predict, unpickle_predict)
def test_equilibrium_thermochemical_context_is_pickleable(datasets_db):
"""Test that the context for equilibrium thermochemical data is pickleable"""
datasets_db.insert(CU_MG_EQ_HMR_LIQUID)
dbf = Database(CU_MG_TDB)
symbols_to_fit = database_symbols_to_fit(dbf)
initial_guess = np.array([unpack_piecewise(dbf.symbols[s]) for s in symbols_to_fit])
prior_dict = EmceeOptimizer.get_priors(None, symbols_to_fit, initial_guess)
ctx = setup_context(dbf, datasets_db)
ctx.update(prior_dict)
ctx_pickle = pickle.dumps(ctx)
ctx_unpickled = pickle.loads(ctx_pickle)
regular_predict = EmceeOptimizer.predict(initial_guess, **ctx)
unpickle_predict = EmceeOptimizer.predict(initial_guess, **ctx_unpickled)
assert np.isclose(regular_predict, unpickle_predict)
def test_zpf_error_for_prescribed_hyperplane_composition(datasets_db):
"""Test a dataset with __HYPERPLANE__ defined works"""
datasets_db.insert(A_B_DATASET_ALPHA)
dbf = Database(A_B_REGULAR_SOLUTION_TDB) # Ideal solution case by default
zpf_data = get_zpf_data(dbf, ["A", "B"], ["ALPHA"], datasets_db, {})
driving_forces, weights = calculate_zpf_driving_forces(zpf_data)
flat_driving_forces = np.asarray(driving_forces).flatten()
assert len(flat_driving_forces) == 1
assert np.isclose(flat_driving_forces[0], 0.0)
def test_zpf_error_hyperplane_with_null_phases(datasets_db):
"""Test typical use case of __HYPERPLANE__, where no phase compositions are defined."""
datasets_db.insert(CU_MG_DATASET_ZPF_HYPERPLANE_TWOPHASE)
dbf = Database(CU_MG_TDB) # Ideal solution case by default
zpf_data = get_zpf_data(dbf, ["CU", "MG"], list(dbf.phases.keys()), datasets_db, {})
driving_forces, weights = calculate_zpf_driving_forces(zpf_data)
flat_driving_forces = np.asarray(driving_forces).flatten()
assert len(flat_driving_forces) == 2 # One for each vertex, HCP_A3 and CUMG2
assert np.allclose(flat_driving_forces, [-18.05883506, -780.50836135])
| {
"content_hash": "36b536f29ea79f736eb2172f0441da8d",
"timestamp": "",
"source": "github",
"line_count": 637,
"max_line_length": 222,
"avg_line_length": 42.602825745682885,
"alnum_prop": 0.6829906404303928,
"repo_name": "PhasesResearchLab/ESPEI",
"id": "e04972c18f5ba42b6b83b7bfc17ccf934b7449b8",
"size": "27177",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_error_functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "587917"
}
],
"symlink_target": ""
} |
from rest_framework import serializers
from . import models
class RepositorySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.Repository
fields = ('id', 'name', 'state')
class PresentationSerializer(serializers.HyperlinkedModelSerializer):
fullname = serializers.ReadOnlyField()
url = serializers.ReadOnlyField()
class Meta:
model = models.Presentation
fields = ('id', 'fullname', 'url')
| {
"content_hash": "8d4cca72dd2c639d402e5a4dc76b7a7f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 69,
"avg_line_length": 23.65,
"alnum_prop": 0.7040169133192389,
"repo_name": "fladi/qraz",
"id": "aba410f454f28c9152547c47c21ac4a82534dc5b",
"size": "520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/qraz/frontend/serializers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "1219"
},
{
"name": "HTML",
"bytes": "10836"
},
{
"name": "JavaScript",
"bytes": "3103"
},
{
"name": "Python",
"bytes": "39682"
}
],
"symlink_target": ""
} |
"""
This script creates a pile of compile-fail tests check that all the
derives have spans that point to the fields, rather than the
#[derive(...)] line.
sample usage: src/etc/generate-deriving-span-tests.py
"""
import sys, os, datetime, stat
TEST_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../test/compile-fail'))
YEAR = datetime.datetime.now().year
TEMPLATE = """// Copyright {year} The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This file was auto-generated using 'src/etc/generate-deriving-span-tests.py'
extern crate rand;
{error_deriving}
struct Error;
{code}
fn main() {{}}
"""
ENUM_STRING = """
#[derive({traits})]
enum Enum {{
A(
Error {errors}
)
}}
"""
ENUM_STRUCT_VARIANT_STRING = """
#[derive({traits})]
enum Enum {{
A {{
x: Error {errors}
}}
}}
"""
STRUCT_STRING = """
#[derive({traits})]
struct Struct {{
x: Error {errors}
}}
"""
STRUCT_TUPLE_STRING = """
#[derive({traits})]
struct Struct(
Error {errors}
);
"""
ENUM_TUPLE, ENUM_STRUCT, STRUCT_FIELDS, STRUCT_TUPLE = range(4)
def create_test_case(type, trait, super_traits, number_of_errors):
string = [ENUM_STRING, ENUM_STRUCT_VARIANT_STRING, STRUCT_STRING, STRUCT_TUPLE_STRING][type]
all_traits = ','.join([trait] + super_traits)
super_traits = ','.join(super_traits)
error_deriving = '#[derive(%s)]' % super_traits if super_traits else ''
errors = '\n'.join('//~%s ERROR' % ('^' * n) for n in range(error_count))
code = string.format(traits = all_traits, errors = errors)
return TEMPLATE.format(year = YEAR, error_deriving=error_deriving, code = code)
def write_file(name, string):
test_file = os.path.join(TEST_DIR, 'derives-span-%s.rs' % name)
# set write permission if file exists, so it can be changed
if os.path.exists(test_file):
os.chmod(test_file, stat.S_IWUSR)
with open(test_file, 'wt') as f:
f.write(string)
# mark file read-only
os.chmod(test_file, stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH)
ENUM = 1
STRUCT = 2
ALL = STRUCT | ENUM
traits = {
'Zero': (STRUCT, [], 1),
'Default': (STRUCT, [], 1),
'FromPrimitive': (0, [], 0), # only works for C-like enums
'Decodable': (0, [], 0), # FIXME: quoting gives horrible spans
'Encodable': (0, [], 0), # FIXME: quoting gives horrible spans
}
for (trait, supers, errs) in [('Rand', [], 1),
('Clone', [], 1),
('PartialEq', [], 2),
('PartialOrd', ['PartialEq'], 8),
('Eq', ['PartialEq'], 1),
('Ord', ['Eq', 'PartialOrd', 'PartialEq'], 1),
('Show', [], 1),
('Hash', [], 1)]:
traits[trait] = (ALL, supers, errs)
for (trait, (types, super_traits, error_count)) in traits.items():
mk = lambda ty: create_test_case(ty, trait, super_traits, error_count)
if types & ENUM:
write_file(trait + '-enum', mk(ENUM_TUPLE))
write_file(trait + '-enum-struct-variant', mk(ENUM_STRUCT))
if types & STRUCT:
write_file(trait + '-struct', mk(STRUCT_FIELDS))
write_file(trait + '-tuple-struct', mk(STRUCT_TUPLE))
| {
"content_hash": "43e40ce33eb0b2c76d3bed5aa92ba9c7",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 96,
"avg_line_length": 29.80327868852459,
"alnum_prop": 0.5992849284928493,
"repo_name": "defuz/rust",
"id": "eeb1b89472b3d36c82c14413fc2aa7d3804d1c04",
"size": "4118",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/etc/generate-deriving-span-tests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "4990"
},
{
"name": "Assembly",
"bytes": "20018"
},
{
"name": "Awk",
"bytes": "159"
},
{
"name": "C",
"bytes": "673946"
},
{
"name": "C++",
"bytes": "65815"
},
{
"name": "CSS",
"bytes": "22910"
},
{
"name": "Emacs Lisp",
"bytes": "43283"
},
{
"name": "JavaScript",
"bytes": "33343"
},
{
"name": "Makefile",
"bytes": "217120"
},
{
"name": "Pascal",
"bytes": "1654"
},
{
"name": "Puppet",
"bytes": "14712"
},
{
"name": "Python",
"bytes": "120322"
},
{
"name": "Rust",
"bytes": "17681629"
},
{
"name": "Shell",
"bytes": "283165"
},
{
"name": "TeX",
"bytes": "57"
},
{
"name": "VimL",
"bytes": "35165"
}
],
"symlink_target": ""
} |
from django.conf.urls import *
urlpatterns = patterns('actividades.fadcanic.views',
url(r'^$', 'filtro_programa', name='filtro_programa'),
) | {
"content_hash": "ea6f0491e54b4557ed0cfe08436655b3",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 62,
"avg_line_length": 30.6,
"alnum_prop": 0.673202614379085,
"repo_name": "shiminasai/plataforma_fadcanic",
"id": "116627394d6b2db70b043bee1b06269eaaa6c758",
"size": "153",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "actividades/fadcanic/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "266182"
},
{
"name": "HTML",
"bytes": "465685"
},
{
"name": "JavaScript",
"bytes": "1047064"
},
{
"name": "Python",
"bytes": "370885"
}
],
"symlink_target": ""
} |
import json
import sys
import os
import argparse
import string
import datetime
import time
from numerous import Numerous, numerousKey, \
NumerousError, NumerousAuthError, NumerousMetricConflictError
#
# usage
# nr [ -c credspec ] [ -t limit ] [-D] [-jknNqwy+/ABEIMPS] [ m1 [v1] ]...
# additional options:
# [ --ensurerate ]
# [ --statistics ]
# [ --requestlog ]
# [ -R ]
#
#
# nr [ -c credspec ] [-Dnq] -I --delete m1 interaction1 ...
# nr [ -c credspec ] [-Dnq] -E --delete m1 event1 ...
# nr [ -c credspec ] [-Dnq] -P --delete m1 ...
# nr [ -c credspec ] [-Dnq] -A --delete m1 userId1 ...
# nr [ -c credspec ] [-Dq] --killmetric m1 ...
# nr [ -c credspec ] [-Dqj][-U]
# nr [ -c credspec ] [-Dqj][-UPw] photo-file
# nr -V
# nr -RR
#
# Perform Numerous operations on metrics
#
# A Numerous API key ("credentials") is required; you either specify it
# via -c or via the environment variable NUMEROUSAPIKEY.
#
# Using -c:
#
# ANY OF THESE FORMS:
# -c @filename the creds come from the named file "filename"
# -c /filename ... we simply assume no NumerousAPI keys
# -c ./filename ... start with '@' '.' or '/'
#
# So you only need the '@' (which gets stripped) if you are specifying
# a relative path that does not start with . or /
# Note too that -c ~/.somefile works because the shell
# expands the tilde (so there is a leading '/' by the time seen here)
#
# The file should contain a JSON object with a key "NumerousAPIKey".
# The APIKey comes from your numerous app - inside the app see
# settings/developer info.
#
# The JSON object may (obviously) contain other things but only
# the NumerousAPIKey will be used in this script.
#
# It is permissible, but not really recommended, to simply have
# a "naked" APIKey by itself in place of the full JSON object.
#
# OTHER FORMS:
# -c @- the creds come from stdin (JSON or naked)
#
# -c anythingElse the creds are anythingElse (JSON or naked)
#
# Without -c the credentials come from environment variable:
#
# NUMEROUSAPIKEY (exact same format options as the -c credspec)
#
# None of this is terribly secure but it is what it is
#
# If option -k/--key is given, after all this happens the NumerousAPIKey
# will be printed. This is sometimes useful in other scripts as a way
# to extract the key from "wherever". No other operations are performed.
#
# If -n is specified, the metric IDs should be names ("labels") instead
# of internal identifiers. The "metricByLabel()" method from class Numerous()
# will be used to look up the metric, with matchType=STRING (no regexp). If
# there are multiple matches that will be reported (and aborted).
#
# Alternatively -N will look the metric up as a regexp, with matchType=ONE.
# As with -n there must be exactly one match.
#
# Note that metricByLabel() can be expensive, but it is (sometimes) convenient.
#
# If you have a slash ('/') in your metric label and want to use -n/-N you
# will have to specify the '-/' ('--noslash') option to turn off slash
# notation for subIDs. If you need subID parsing AND you have a slash
# in your label, you are hosed. Get the numeric ID and don't use -n/-N
# in that case.
#
# If -w (--write) is specified, SOMETHING will be written. That something is
# either metric value itself, or with another option:
#
# -+ (--plus) : the metric value will be ADDED to
# FYI: implemented atomically by the NumerousApp server
# -E (--event) : an event is written ... equivalent to naked -w
# -I (--interaction) : an interaction (comment/like/error) will be written
# -P (--photo) : a photo is attached to the metric. value1 MUST
# be a filename. A mimeType will be inferred.
# -A (--permission) : permission structure is written.
# The metricId should be of form "metricId/userId" and the value
# should be a valid JSON permissions dictionary.
# PAY ATTENTION: JSON not PYTHON DICT STRING. This trips people
# up with True vs true (use true)
#
# The userId can be contained in the JSON instead of the / notation;
# if it is in both the / notation one takes precedence. That can be
# useful for duplicating for multiple users from a template.
# -B (--subscriptions) : subscription is written.
# The subscription value should be a valid JSON subscription dictionary.
# Any values you don't specify will be merged with the values from
# your current subscription.
# PAY ATTENTION: JSON not PYTHON DICT STRING. This trips people
# up with True vs true (use true)
# -M (--metric) : a metric is CREATED or UPDATED.
# To create a metric, the name (m1) MUST begin with a '+' which
# will be stripped ("+NewName" becomes "NewName"). The "-n" flag
# is not required (and has no effect in this case). To update a
# metric just specify its metric ID (no '+') or name (-n and no '+')
#
# value1 MUST be present and should be a JSON object; it will
# be sent as the metric data per the API. (Use '{}' for defaults).
# Two other forms are accepted besides JSON:
# a naked value number -- e.g. 17
# the word "private"
# A naked number such as 17 is equivalent to '{ "value" : 17 }'.
# The word private is equivalent to '{ "private" : true }' (and
# value 0 which is implied). The default for private is false. There
# is no shorthand for specifying private AND a non-zero initial value;
# use full on JSON for that.
#
# When updating a metric the current fields will be read and the
# fields you specify will be modified and written (PUT) back. Thus
# for example, you can change just the description of a metric and
# preserve all of its other current parameters. The underlying API
# requires a PUT of the complete new object; this handles that for you.
#
# Note It is an error to specify any value on a metric UPDATE.
# You must use the regular -w method for writing to a metric value.
#
# You can create/update multiple metrics (m2/v2 etc)
#
# Note that you still have to specify -w with -M. There's not really a
# particularly good reason for this; it's just how I did it, simply to
# preserve the rule that -w means m1/v1 pairs in the arguments while
# no -w means the arguments are all metric IDs. Naked -M
# by itself is the same as no options and means just read metrics.
#
# If -y is specified the flag "onlyIfChanged" will be set on the updates.
# This flag is an error if used on anything other than a metric value write.
#
# NOTE: This is NOT atomic at the server as of Oct2014 and the Numerous
# people say they aren't sure they can ever make it so
#
# If you are writing a metric value and you format the "value" like this:
# "EPOCHTIME: mm/dd/yy hh:mm:ss"
# (double quotes are not part of the format; I'm merely
# pointing out the argument must be one "word")
# then the value written to the metric will be the given date converted
# into epochtime (UNIX "seconds since 1970"). This is what you want when
# updating a "timer" type of metric. This syntax also allows one special form:
# "EPOCHTIME: now"
#
# in which case the current system time will be used.
#
#
# If you are writing a metric value and you format the "value" like this:
# "1234 @ mm/dd/yy hh:mm:ss"
# then the value (1234 in this example) will be written with
# the "updated" option (see NumerousMetric.write() API) set to the given time.
#
#
# Without -w:
# -A (--permissions) : permissions will be read
# * if a userId is given via slash notation ('2347293874234/98764782')
# then just that one permission will be read/displayed.
# * otherwise the entire collection will be read/displayed.
#
# -B (--subscriptions) : subscriptions will be read.
# * If no metric IDs are given your entire set of subscriptions
# will be read.
# * As a special case, -Bn and no metric IDs will simply display a list
# of metricID and name. This is useful for finding the metric IDs of all
# the metrics you are subscribed to (have put into your display) in the App.
#
# * Otherwise the subscription parameters on a particular
# metric(s) are read.
#
# -E (--event) : the events will be read.
# Events are value changes.
# You can read a SINGLE event by ID using slash notation:
# -E 7834758745/245235235
# (metric ID and /eventID).
#
# If the eventID conforms to Numerous timestamp syntax, e.g.:
# 2015-02-08T15:27:12.863Z
# then it is used as a "before" key and you will get the one event
# that is at or before the given timestamp.
#
# -I (--interaction) : interactions will be read.
# Interactions are everything other than value changes
#
# -S (--stream) : the stream will be read
# The stream is events and interactions together
#
# -P (--photo) : the URL for the photo will be displayed
#
# -U If no other arguments given, your user info will be read
# With arguments ("m1" values though they aren't metrics) then
# the user info for those ID(s) will be read. Field selection works
# or the entire thing is displayed in JSON.
#
# When writing something, the m1/v1... command line arguments
# should be metric/value pairs. They will be written (plain -w) or
# ADDed (-+) (NumerousAPI ADD action), or sent as Interactions (-I)
# (e.g., comments and such).
#
# When writing values to metrics, the value must simply be a naked number.
# When writing other types (e.g., interactions) the value can (usually should)
# be a JSON. Note that it has to be ONE shell argument so use quotes carefully.
#
# Without -w, if -S or -I is specified the Stream collection or the
# Interactions collection will be read.
#
# Without any -w/-S/-I etc options the specified metrics (m1...) will be read
#
# If you are reading something you can specify which element you want
# displayed using brackets after the ID. So, for example:
# nr 258495834583459[label]
#
# will display the label element of metric 258495834583459 instead
# of its value. For convenience, subdictionaries will also be automatically
# indexed to whatever degree makes sense (if there is ambiguity you will get
# the "first match" however that is defined by many unknown factors). This
# use case is mostly intended for doing things like 258495834583459[web]
# to display the web URL (which itself is inside the "links" dictionary).
#
# When using the [] feature be careful of escaping/quoting/etc.
# This feature does mean that if you foolishly put a '[' in the actual label
# of your metric you can't get at that metric by name with this program.
# (You can still access it by numeric ID of course). I did not bother to
# implement any escape such as [[
#
# It is (of course) far better to use the [] feature to access a particular
# field (e.g., commentBody) of the stream and similar outputs than it is to
# pipe the "human output" to awk '{print $2}' or similar hacks.
#
# Finally, with no operations and no metric IDs (m1 etc) at all, a list
# of metric IDs is read from the server and printed out
#
# If -j is specified, output will be JSON. Else bare. Note that field
# selection doesn't happen on JSON output -- you always get the whole thing.
#
# If "-t limit" is specified, reads on collections will stop at limit
#
# If -D (--debug) is specified, debugging (HTTP in particular) output is on
#
# If -V is specified, a version string is printed (and nothing else is done)
#
# If -q is specified, no "normal" output is generated. Some error cases
# or unhandled exceptions might still cause output, but no server interaction
# that "works" (which includes the server returning a "normal" error) will
# generate output. You *can* specify quiet even where it sort of makes no
# sense (e.g., reading a variable and quiet), perhaps you just want to
# test the exit code (so we allow that combination).
#
# Interactions (comments/likes/errors), photos, and events (value updates) can
# be deleted using the --delete form. There is no "short option"
# (single letter) flavor of --delete on purpose. Specify -I to delete
# an interaction or -E to delete an event, and give the metric ID and item ID
# There is no "val" for --delete --P
#
# Permissions can be deleted (--delete -A) and your subscription
# to a metric can be deleted (--delete -B).
#
#
# Lastly, the metric itself can be deleted but to avoid mistakes with --delete
# causing misery this is done with --killmetric. The syntax is:
#
# nr --killmetric m1 [ m2... ]
#
# and ONLY the flags -D (debug) and/or -q (quiet) are valid. In particular
# you CANNOT kill a metric by name, you must first look up its numeric Id.
#
# OTHER OPTIONS
# --statistics will display various statistics at the end
# --requestlog will display a log of all the requests made to the server
#
# Examples:
#
# WRITE 17 to MyVar and 42 to MyOtherVar:
# nr -w -n MyVar 17 MyOtherVar 42
#
# Credentials come from the environment.
# There will be at least one extra API call to translate the
# variable names into IDs (there could be multiple calls if the
# user has enough variables to make the server segment the replies)
#
# SAME, using a credentials file:
# nr -c ~/.ncred -w -n MyVar 17 MyOtherVar 42
#
# ADD 1 to a given metric:
# nr -w+ 3662358291300702287 1
#
# COMMENT: writes the comment "bozo the clown" to XYZ.
# nr -wI -n XYZ '{ "kind": "comment", "commentBody": "bozo the clown" }'
#
# COMMENT: simple case
# nr -wI -n XYZ 'bozo the clown'
#
# as a special case hack a "naked" interaction (not a JSON)
# will be taken as a comment. So this is identical to previous example.
#
# You can use -I to send any valid type of interaction. So, for example:
#
# ERROR:
# nr -wI 53349538495834 '{ "kind": "error", "commentBody": "error stuff" }'
# (note that numerous uses commentBody for error details too)
#
# LIKE:
# nr -wI 53349538495834 '{ "kind": "like" }'
#
# CREATE: create a metric
# nr -wM +NewM1 '{}' +NewM2 '{ "value" : 17, "private" : true }'
#
# creates NewM1 with default values and NewM2 with initial
# value 17 and private (the API default for private is false)
# Outputs the ID of the created metric (or -1 if error)
#
# UPDATE: set a metric's description
# nr -wM -n XYZ '{ "description": "i am a little teapot" }'
#
# DISPLAY INTERACTIONS:
# nr -Ij -n MyVar
#
# Display the interactions collection in JSON
#
# DISPLAY the 100 most recent Stream items:
# nr -S -t 100 -n MyVar
#
# USING FIELDS: Translate SomeName into its metric id:
# nr -n 'SomeName[id]'
#
# will display the numeric metric ID for SomeName
#
# GETTING THE WEB ADDRESS OF A METRIC
# nr '34552988823401[web]'
#
# would display the web URL of the given metric. Note that this
# uses the "find any key match anywhere" feature because the
# field called "web" is actually in a subdictionary of the "links"
#
# USING FIELDS WITH EVENTS
# nr -E --limit 1 'SomeName[value]'
#
# is a silly way to get at the current value of SomeName using the
# fields feature and the limit feature.
#
# DELETE AN EVENT (id 23420983483332)
# nr --delete -E 34552988823401 23420983483332
#
# PERMISSIONS:
# NOTE: Permissions only have effect if the metric "visibility"
# attribute is set to "private".
#
# Give user 66178735 read permission on metric 34552988823401:
# nr -wA 34552988823401 '{ userId : "66178735", "readMetric" : true }'
#
# Same using slash notation for userId:
# nr -wA 34552988823401/66178735 '{ "readMetric" : true }'
#
# Display all the permissions for a metric:
# nr -A 34552988823401
#
# Display user 66178735's permissions:
# nr -A 34552988823401/66178735
#
# Display user 66178735's read permission specifically:
# nr -A '34552988823401/66178735[readMetric]'
#
# Delete user 66178735's permissions on a metric:
# nr -A --delete 34552988823401 66178735
#
# This form allows deleting ALL permissions on a metric (be careful!):
# nr -A --delete 34552988823401 '!ALL!'
#
parser = argparse.ArgumentParser()
parser.add_argument('-V', '--version', action="store_true", help="display version info and exit")
parser.add_argument('-/', '--noslash', action="store_true", help="disable slash parsing. Can be useful with -n/-N if metric label has a slash in it.")
parser.add_argument('-c', '--credspec', help="file name containing API key or the key itself")
parser.add_argument('-j', '--json', action="store_true", help="JSON output format")
parser.add_argument('-n', '--name', action="store_true", help="search metric by label using string match")
parser.add_argument('-N', '--regexp', action="store_true", help="search metric by label using unanchored regexp")
parser.add_argument('-q', '--quiet', action="store_true")
parser.add_argument('-w', '--write', action="store_true")
parser.add_argument('-t', '--limit', type=int, default=-1)
parser.add_argument('-D', '--debug', action="count", default=0, help="turn on debug output. Use twice for even more output")
parser.add_argument('--delete', action="store_true")
parser.add_argument('-U', '--user', action="store_true")
parser.add_argument('--statistics', action="store_true", help="show statistics from numerous class")
parser.add_argument('-R', '--ratelimits', action="count", default=0, help="display rate limit info. Use -RR to ONLY do that (no other processing)")
parser.add_argument('--ensurerate', type=int, default=0, help="delay if necessary for sufficient API rate limit.") # use with -R
argx=parser.add_mutually_exclusive_group()
# these are mutually exclusive because both use throttle overrides
# (could have allowed both by being more clever, but why)
argx.add_argument('--retry500', action="store_true", help="XXX automatically retry if server returns error 500. XXX This should probably be eliminated now.")
argx.add_argument('--requestlog', action="store_true", help="show complete log of requests made to server") # show all API calls
wgx = parser.add_mutually_exclusive_group()
wgx.add_argument('-+', '--plus', action="store_true")
wgx.add_argument('-E', '--event', action="store_true")
wgx.add_argument('-A', '--permissions', dest="perms", action="store_true")
wgx.add_argument('-B', '--subscriptions', dest="subs", action="store_true")
wgx.add_argument('-I', '--interaction', action="store_true")
wgx.add_argument('-S', '--stream', action="store_true")
wgx.add_argument('-M', '--metric', action="store_true")
wgx.add_argument('-P', '--photo', action="store_true")
wgx.add_argument('-y', '--onlyIf', action="store_true")
wgx.add_argument('-k', '--key', action="store_true", help="Just report API key (i.e., from env or file")
wgx.add_argument('--killmetric', action="store_true", help="Permanently delete metric. Requires numeric ID")
parser.add_argument('keyvals', nargs='*', metavar='key [value]')
args = parser.parse_args()
#
# clean up / sanitize some of the argument semantics
#
if args.version:
print(Numerous(None).agentString)
exit(1)
# regexp implies name
if args.regexp:
args.name = True
# many operations never take subIDs so turn on noslash for you automatically
# for those. Basically slash fields only happen for event/interation/perms
if not (args.event or args.perms or args.interaction):
args.noslash = True
# --delete is exclusive with MUCH of the 'wgx' exclusive
# group but not all
# ... so couldn't use built-in exclusion features
# ... could have just ignored, but it seems best to make sure that what
# you specified makes total sense (especially before deleting something)
#
# and it requires --event, --interaction, --subscriptions, --perms, or --photo
#
if args.delete:
nope = { "write", "plus", "stream", "metric", "onlyIf", "user",
"key", "killmetric" }
musthaveone = { "event", "interaction", "photo", "subs", "perms" }
argsDict = vars(args)
bad = False
for x in nope:
if argsDict.get(x):
print("Can't have --delete and --" + x)
bad = True
gotOne = None
for x in musthaveone:
if argsDict.get(x):
if gotOne:
print("Can only have one of {}, {} with --delete".format(x, gotOne))
bad = True
gotOne = x
if not gotOne:
print("--delete requires one of: {}".format(musthaveone))
bad = True
if bad:
exit(1)
# --user has similar (but not quite the same) exclusion rules
if args.user:
nope = { "plus", "stream", "metric", "onlyIf", "event", "interaction",
"key", "subs:subscriptions", "killmetric", "name",
"perms:permissions" }
argsDict = vars(args)
bad = False
for xt in nope:
x = xt.split(':')
k = x[0]
try:
optname = x[1]
except:
optname = k
if argsDict.get(k):
print("Can't have --user and --" + optname)
bad = True
if bad:
exit(1)
if args.write and not args.photo:
print("--write requires -P/--photo")
print("(no other form of user update is implemented yet)")
exit(1)
#
# we do not allow you to kill a metric by name. It's just too error prone
#
if args.killmetric and args.name:
print("--killmetric ONLY works by metricId, not by name. No -n allowed.")
exit(1)
#
# As a shortcut we allow naked -+ to mean -w+
#
if args.plus:
args.write = True
#
# limit of -1 means infinite and I think it's cleaner to use None in code
#
if args.limit == -1:
args.limit = None
#
# writing a user photo is a special case -- exactly one argument
#
if args.write and args.user and args.photo and len(args.keyvals) != 1:
print("User photo update requires exactly one file name argument")
exit(1)
if args.write and (len(args.keyvals) % 2) != 0 and not args.user:
print("Write/update specified but arg list is not metric/value pairs")
exit(1)
if args.write and len(args.keyvals) == 0:
print("Write/update specified but no metric/value pairs given")
exit(1)
#
# -y only makes sense if writing/adding
#
if args.onlyIf and not args.write:
print("-y/--onlyIf only valid when writing a metric with -w (--write)")
exit(1)
#
# Can't have any subfields if writing or deleting
#
if args.write or args.delete:
for m in args.keyvals[0::2]:
if '[' in m:
print("Can only use [field] notation for reading:", m)
exit(1)
# this convenience function implements the "it can come from almost anywhere" thing
k = numerousKey(args.credspec)
if args.key:
# this is a hack way to just extract the API key from "wherever"
# honestly it probably should be a separate program but here we are
if k:
print(k)
exit(0)
else:
print("No API Key")
exit(1)
# if we've been asked to report on rate limits then just do that first
# and there is no throttling (because we don't want to be throttled while
# reporting about throttling lol)
#
# Note that if you just gave us an ensure we'll do that silently before doing
# the rest of the requests you have specified
#
if args.ratelimits > 0 or args.ensurerate > 0:
bequiet = args.quiet or (args.ratelimits == 0)
remain = -1
refresh = -1
nrRaw = nrServer = Numerous(apiKey=k, throttle=lambda nr,tp,td,up: False)
try:
nrRaw.ping()
remain=nrRaw.statistics['rate-remaining']
refresh=nrRaw.statistics['rate-reset']
except NumerousError as x:
if x.code == 429: # we are in the Too Many condition already
remain = 0 # report that as zero
refresh = nrRaw.statistics['rate-reset']
elif x.code == 401: # make a nice error output with unauthorized
print("Server says: {}. Check -c or NUMEROUSAPIKEY environment.".format(x.reason))
exit(1)
else:
raise # anything else, not sure what is going on, reraise it
if args.ensurerate:
t = refresh + 1 # +1 is a just-to-be-sure thing
if remain >= args.ensurerate:
msg = "No delay needed; have {} operations left.".format(remain)
t = 0
else:
msg = "Delaying {} seconds; only have {} APIs left.".format(t, remain)
if not bequiet:
print(msg)
if t > 0:
time.sleep(t)
elif not bequiet:
print("Remaining APIs: {}. New allocation in {} seconds.".format(remain,refresh))
if args.ratelimits > 1: # -RR means just do this then exit
exit(0)
# this throttle function implements retries on HTTP errors 500/504
# It's not usually specified; but can be useful if the server is being buggy
# NOTE: We only retry "guaranteed idempotent" requests which are GETs.
# There are some idempotent requests that are still not retried this way.
# C'est la vie. The server isn't really supposed to return 500/504 :)
#
def throttleRetryServerErrors(nr, tp, td, up):
if tp['result-code'] in (500, 504) and tp['request']['http-method'] == 'GET':
nr.statistics['throttleRetryServerErrors'] += 1
tp['result-code'] = 429 # make this look like a Too Many
tp['rate-reset'] = 15 # wait at least 15s, possibly more w/backoff
return up[0](nr, tp, up[1], up[2])
# used to keep log of all API requests made
def throttle_log_requests(nr, tparams, td, up):
td.append((tparams['request'], tparams['result-code']))
return up[0](nr, tparams, up[1], up[2])
tf = None
td = None
if args.retry500:
tf = throttleRetryServerErrors
elif args.requestlog:
log_of_all_requests = []
td = log_of_all_requests
tf = throttle_log_requests
nrServer = Numerous(apiKey=k, throttle=tf, throttleData=td)
# if we've been asked to report server statistics, enhance the
# timestamp reporting to report an array of the last 10 response times
# (seed the array in the statistics variable; see the class implementation)
if args.statistics:
nrServer.statistics['serverResponseTimes'] = [0]*10
if args.debug:
if args.debug > 1:
nrServer.debug(10) # lol, but 10 turns on extra debug
else:
nrServer.debug(1) # standard debug level
# test connectivity ... mostly to verify creds
try:
nrServer.ping()
except NumerousAuthError:
print("Authorization failed. Likely cause is bad credentials (API key)")
exit(1)
except NumerousError as x:
print("Server error: {} {} ({})".format(x.code, x.reason, x.details))
exit(1)
#
# This function takes a string that *MIGHT* be a numeric value and
# converts it to a number, or just returns it. This is used for two reasons:
#
# 1) The Numerous server insists (quite reasonably so) that numeric values
# come across as JSON numbers (17), not as strings ("17"). It turns out
# the server only enforces that on some APIs and not others; but
# we choose to be conservative in what we send regardless.
#
# 2) Implement syntax for certain special circumstances:
# * value@timestamp for setting "updated" times in write()s
# * EPOCHTIME: mm/dd/yy hh:mm:ss -- for "timer" metrics
# * EPOCHTIME: now -- for "timer" metrics
#
def valueParser(s):
EpochTimeSyntax = "EPOCHTIME:"
rval = s # default result is just the source
try:
ts = None
x = s.split('@')
s = x[0]
if len(x) == 2:
ts = "EPOCHTIME:" + x[1]
elif s.startswith(EpochTimeSyntax):
ts = s
x = None
except AttributeError:
x = [ s ]
tval = -1
if ts: # we have an EPOCHTIME: or an '@' form
sx = ts[len(EpochTimeSyntax):].strip() # the rest of s
# these are all the formats we'll try for converting a date stamp
# personally I don't recommend you use the ones omitting the full
# time but it's up to you
dateformats = [
# NOTE: ADD NEW ONES AT END; the [0]'th is used below explicitly
"%m/%d/%Y %H:%M:%S", # four digit year
"%m/%d/%y %H:%M:%S", # two digit year
"%m/%d/%Y %H:%M", # seconds omitted (yyyy)
"%m/%d/%y %H:%M", # (yy)
"%m/%d/%Y", # time completely omitted (yyyy)
"%m/%d/%y",
"%Y-%m-%dT%H:%M:%S", # close to the Numerous format
"%Y-%m-%d" # just the date part of Numerous fmt
]
# kinda hokey, but easy
if sx == "now":
sx = time.strftime(dateformats[0])
# try first mm/dd/yyyy then mm/dd/yy .. could add more formats too
for fmt in dateformats:
try:
tval = float(time.strftime("%s", time.strptime(sx, fmt)))
break
except:
pass
# if we get all the way to here without parsing
# throw an exception because of that.
if tval < 0:
raise ValueError
# At this point tval is < 0 if no timestamp syntax was used, or else
# tval is the timestamp value and there may or may not still be a regular
# value to parse in x[0]
if x:
s = x[0]
try:
if len(s) > 0:
# is it just a naked integer?
try:
rval = int(s)
except ValueError:
# is it just a naked float?
try:
rval = float(s)
except ValueError:
rval = s
except TypeError:
rval = s
else:
rval = tval
tval = -1
return (rval, tval)
#
# support function for the metric[field] concept
# Given a dictionary (usually a Numerous result) and a field name that
# is (supposedly) either in that dictionary OR in a subdictionary,
# return the field
#
def findSomethingSomewhere(d, f):
# this could be duck typed but we do check for dict explicitly
# otherwise the expression d[k] risks indexing a string element or
# other iterable that isn't a dictionary. The whole thing is a bit
# hokey but it is a damn convenient feature to be able to say
# MetricID[web]
# to get at MetricID[links][web]
#
# Keep in mind this is all just a command line shell utility
# so convenience trumps some other considerations
if type(d) is not dict:
return None
elif f in d:
return (d[f])
else:
for k in d:
subdict = d[k]
x = findSomethingSomewhere(subdict, f)
if x:
return x
return None
#
# Get all (or up to limit) of a user's metrics
#
def getMetrics(nr, limit=None):
n = 0
metrics = []
for m in nr.metrics():
if limit and n == limit:
break
metrics.append(m)
n = n + 1
return metrics
def printStreamResults(items, fld):
if type(items) == str:
print(items) # these are error messages
else:
for i in items:
if fld:
print(i.get(fld,None))
else:
c = i.get('commentBody', None)
a = i.get('authorId', None)
v = i.get('value', None)
sID = i.get('id', '??? NO ID ???')
print(i['kind'], sID, v, i['updated'], a, "--", c)
def printEventResults(r, fld):
if type(r) == str:
print(r) # these are error messages
else:
for i in r:
if fld:
print(i.get(fld,None))
else:
# the initial value when a metric is created
# does not have an authorId (is this a server bug?)
# so we need to be careful...
a = i.get('authorId', 'INITIAL-CREATION-VALUE')
print(i['value'],"@",i['updated'],"by",a,"id",i['id'])
def printPerms(r, fld):
if type(r) == str:
print(r) # these are error messages
else:
for i in r:
if fld:
print(i.get(fld,None))
else:
s = i['userId'] + " on " + i['metricId'] + ": "
for k in [ 'readMetric', 'updateValue', 'editPermissions', 'editMetric' ]:
if i.get(k, False):
s += k
s += " "
print(s)
def printDeleteResults(r):
print("%s/%s -- %s" %(r['ID'], r['delID'], r['result']))
def getIterableStuff(m, i, limit):
n = 0
list = []
for x in i:
if limit and n == limit:
break
n = n + 1
list.append(x)
return list
# write an image file to either a metric or to the user record
def doPhotoWrite(metricOrNR, imageFName):
try:
f = open(imageFName, "rb")
except IOError:
return "cannot open: " + imageFName
mimeGuess = [ ( '.jpg', 'image/jpeg' ),
( '.jpeg', 'image/jpeg' ),
( 'gif', 'image/gif' ),
( 'png', 'image/png' ) ]
mType = None
for m in mimeGuess:
if imageFName.endswith(m[0]):
mType = m[1]
break
if not mType:
mType = 'image/jpeg' # hope for the best
try:
return metricOrNR.userPhoto(f, mType)
except AttributeError:
return metricOrNR.photo(f, mType)
def mainCommandProcessing(nr, args):
# XXX it is important that these keys cannot appear in a base36 encoding
# Wow that was an obscure bug, when this was used with this mspec:
# http://n.numerousapp.com/m/1bzm892hvg4id
# that happened to include 'id' (the previous key used here)
# and that false-positived an "if 'id' in mspec" test
#
# The whole way all this works is a hack that obviously grew over time :)
#
mspecIDKey = '_*id*_'
mspecID2Key = '_*id2*_'
mspecFIELDKey = '_*field*_'
#
# Sometimes it is m1 v1 pairs sometimes just metrics
#
if args.write or (args.delete and not (args.photo or args.subs)):
xm = args.keyvals[0::2]
values = args.keyvals[1::2]
else:
xm = args.keyvals
# parse any field and ID2 specifications:
metrics = []
for m in xm:
id = m
id2 = None
fld = None
# don't even do any mods if killmetric (paranoid/careful)
if not args.killmetric:
# first split off any field definition
if '[' in id:
x = id.split('[')
# very rudimentary syntax checks
if len(x) != 2 or not x[1].endswith(']'):
print("bad metric specification", m)
exit(1)
# nuke the trailing ']' on the field spec
fld = x[1][:-1]
id = x[0]
# Check for slash notation unless told not to.
if (not args.noslash) and '/' in id:
x = id.split('/')
if len(x) == 2:
id2 = x[1]
id = x[0]
if id2 or fld:
m = { mspecIDKey : id }
if id2:
m[ mspecID2Key ] = id2
if fld:
m[ mspecFIELDKey ] = fld
metrics.append(m)
#
# If we're doing this by name, translate the IDs first
#
resultList = []
exitStatus = 0
#
# If no keys in the argument list then:
# If doing subscriptions, get the top-level subscriptions list
# Otherwise get the list of all keys
#
# Otherwise read or write the values. In either case accumulate
# the resultList for subsequent output
#
if len(metrics) == 0:
if args.subs:
for s in nr.subscriptions():
# this is a hack but if you specify -n we just display
# the metric ID and the name this way
if not args.quiet: # quiet is dumb, but whatever
if args.name:
id = s['metricId']
print("{} {}".format(id, nr.metric(id).label()))
else:
print(s)
print(" ")
elif args.user:
u = nr.user()
if not args.quiet:
if args.json:
print(json.dumps(u))
else:
print("User: {userName} [ {fullName} ], id: {id}".format(**u))
else:
vlist = getMetrics(nr, args.limit)
# arguably "quiet" is dumb, but it does test connectivity
if not args.quiet:
for v in vlist:
if args.json:
print(json.dumps(v))
elif args.name:
print(v['id'] + " " + v['label'])
else:
print(v['id'])
elif args.user and args.write and args.photo:
v = doPhotoWrite(nr, args.keyvals[0])
print (v)
else:
while len(metrics):
mspec = metrics.pop(0)
if mspecIDKey in mspec:
r = { 'ID' : mspec[mspecIDKey] }
if mspecFIELDKey in mspec:
r['FIELD'] = mspec[mspecFIELDKey]
if mspecID2Key in mspec:
r['ID2'] = mspec[mspecID2Key]
else:
r = { 'ID' : mspec }
# if we are creating a new metric, don't make a NumerousMetric from ID
creatingNew = (args.write and args.metric and r['ID'][0] == '+')
invalidMetric = False
if creatingNew:
r['ID'] = r['ID'][1:] # strip the plus
metric = None
else:
metric = None
if args.name:
if args.regexp:
mtype = 'ONE'
else:
mtype = 'STRING'
s = r['ID']
try:
metric = nr.metricByLabel(s, matchType=mtype)
except NumerousMetricConflictError as e:
print("More than one match: ", e.details)
metric = None
if not metric:
metric = nr.metric(r['ID'])
# this helps humans figure out they have screwed up
# if we were doing name translation see if the metric translated
# Only do this when args.name because it's extra overhead so we
# don't do it when you look more likely to be a script (because
# you used the lower level metric ID directly)
if args.name and not metric.validate():
invalidMetric = True
if invalidMetric:
r['result'] = "ERROR / Invalid Metric: " + r['ID']
exitStatus = 1
elif args.delete:
if args.photo:
delWhat = None
r['delID'] = "photo"
elif args.subs:
delWhat = None
r['delID'] = "subscription"
else:
delWhat = values.pop(0)
r['delID'] = delWhat
try:
if args.event:
metric.eventDelete(delWhat)
elif args.interaction:
metric.interactionDelete(delWhat)
elif args.photo:
metric.photoDelete()
elif args.subs:
# "deleting" a subscription means turning off
# all notifications, which we do somewhat generalized:
s = metric.subscription()
for k in s.keys():
if k.startswith('notif') and s[k] == True:
s[k] = False
metric.subscribe(s)
elif args.perms:
if delWhat == '!ALL!':
for x in metric.permissions():
metric.delete_permission(x['userId'])
else:
metric.delete_permission(delWhat)
else: # never happens
raise ValueError # should not happen
r['result'] = " Deleted"
except NumerousError as v:
exitStatus = 1
r['result'] = "ERROR / Not Found (" + v.reason + ")"
elif args.write and args.photo:
# the matching value given is (should be) a file name
r['result'] = doPhotoWrite(metric, values.pop(0))
elif args.write:
val = values.pop(0)
# sometimes val is a JSON and sometimes it is naked
# to simplify the rest of this turn it into something
# that is ALWAYS a dictionary, but if it was naked we
# put the "val" in as '__naked__' key
naked = '__naked__'
try:
jval = json.loads(val)
# this test serves two purposes: see if it is dict-like,
# and protect our __naked__ hack
if naked in jval:
# seriously, you are a twit...
print("Invalid Numerous JSON given: ", val)
exit(1)
except (TypeError, ValueError):
# it was naked, or malformed.
try:
jval = { naked : valueParser(val) }
except ValueError: # e.g., "EPOCHTIME: " bad format
jval = { naked : val } # this will get dealt with below
if args.perms:
if naked in jval:
r['result'] = "Permissions must be JSON format: " + val
exitStatus = 1
else:
u = r.get('ID2', None)
r['result'] = metric.set_permission(jval, userId=u)
elif args.subs:
# you write a subscription as json updated parms.
# Nudity is not allowed.
if naked in jval:
r['result'] = "Subscriptions must be JSON format: " + val
exitStatus = 1
else:
r['result'] = metric.subscribe(jval)
elif args.interaction:
# interactions are comments/likes/errors
# if you specify a naked string it's a comment
# you specify the other forms (or comments if you like)
# as a JSON. Figure out what's going on ... then do it
if naked in jval:
j = { 'kind': "comment", 'commentBody' : val }
else:
j = jval
if j['kind'] == "comment":
metric.comment(j['commentBody'])
elif j['kind'] == "error":
metric.sendError(j['commentBody'])
elif j['kind'] == "like":
metric.like()
r['result'] = "OK"
elif args.metric and not creatingNew:
# this is the metric update case (but not create)
# NOTE: This is for metric attributes (description etc)
# you cannot update the value parameter this way
# (server will ignore any 'value' in the json)
# We don't implement any naked shortcut; val MUST be JSON
if naked in jval:
r['result'] = "Update requires JSON for parameters"
exitStatus = 1
else:
r['result'] = metric.update(jval)
elif creatingNew:
# if you specified it naked, it's just the value or "private"
if naked in jval:
vp = jval.pop(naked)
if vp[0] == "private":
jval['private'] = True
jval['value'] = 0 # this is implied by API anyway
else:
jval['value'] = vp[0]
elif 'value' in jval:
# allow for EPOCHTIME: in value here
jval['value'] = valueParser(jval['value'])[0]
metric = nr.createMetric(r['ID'], attrs=jval)
if args.json:
r['result'] = metric.read(dictionary=True)
else:
r['result'] = metric.id
else:
# we are writing a metric value
try:
x = valueParser(val)
val = x[0]
if x[1] < 0:
tval = None
else:
dt = datetime.datetime.fromtimestamp(x[1])
tval = dt.strftime('%Y-%m-%dT%H:%M:%S.000Z')
try:
r['result'] = metric.write(val,
onlyIf = args.onlyIf,
add = args.plus,
dictionary = args.json,
updated=tval)
except NumerousMetricConflictError as e:
# it's a bit of a hack but if you asked for quiet
# then this "error" (which isn't really an error)
# is ignored as far as exitStatus goes, because
# you can't tell the difference between this and
# a "real" error when quiet. Could have used
# exit status 2 for this I suppose.
exitStatus = 0 if args.quiet else 1
if args.json:
r['result'] = { 'errorCode' : e.code,
'errorDetails' : e.details,
'errorReason' : e.reason }
else:
r['result'] = "NoChange"
except ValueError:
exitStatus = 1
r['result'] = "Bad value syntax: '{}'".format(val)
elif args.killmetric:
try:
metric.crushKillDestroy()
r['result'] = r['ID'] + " Deleted"
except NumerousError as e:
r['result'] = r['ID'] + " delete FAILED " + e.reason
elif args.interaction:
if 'ID2' in r:
r['result'] = [ metric.interaction(r['ID2']) ]
else:
iterable = metric.interactions()
r['result'] = getIterableStuff(metric, iterable, args.limit)
elif args.perms:
if 'ID2' in r:
r['result'] = [ metric.get_permission(r['ID2']) ]
else:
iterable = metric.permissions()
r['result'] = getIterableStuff(metric, iterable, args.limit)
elif args.stream:
# no support for reading a single stream item
# (read a single item using the interaction/event interfaces)
iterable = metric.stream()
r['result'] = getIterableStuff(metric, iterable, args.limit)
elif args.event:
if 'ID2' in r:
# ID2 can either be a naked eventID or a timestamp
id2 = r['ID2']
if 'T' in id2 and id2[-1] == 'Z':
r['result'] = [ metric.event(before=id2) ]
else:
r['result'] = [ metric.event(evID=id2) ]
else:
iterable = metric.events()
r['result'] = getIterableStuff(metric, iterable, args.limit)
elif args.photo:
r['result'] = metric.photoURL()
elif args.user:
u = nr.user(r['ID'])
if 'FIELD' in r:
r['result'] = u[r['FIELD']]
else:
r['result'] = u
elif args.subs:
try:
# metricID[+] means get all the subscriptions for the metric
if mspecFIELDKey in mspec and mspec[mspecFIELDKey] == '+':
slist = []
for s in metric.subscriptions():
slist.append(s)
r['result'] = slist
else:
d = metric.subscription()
if args.json:
r['result'] = d
elif mspecFIELDKey in mspec:
r['result'] = findSomethingSomewhere(d, mspec[mspecFIELDKey])
else:
r['result'] = d
except NumerousError as e:
exitStatus = 1
if args.json:
r['result'] = { "NumerousError" : { "code" : e.code, "reason" : e.reason }}
else:
r['result'] = "Error: " + e.reason
else:
try:
# always read the full dictionary... and use the entire
# result if args.json, otherwise use any field value given or
# in the simple case just the value
d = metric.read(dictionary = True)
if args.json:
r['result'] = d
elif mspecFIELDKey in mspec:
r['result'] = findSomethingSomewhere(d, mspec[mspecFIELDKey])
else:
r['result'] = d['value']
except NumerousError as e:
exitStatus = 1
if args.json:
r['result'] = { "NumerousError" : { "code" : e.code, "reason" : e.reason }}
elif e.code == 403:
r['result'] = "No read permission on this metric"
else:
r['result'] = "Error: " + e.reason
resultList.append(r)
#
# display results accordingly
#
if not args.quiet:
if args.json:
j = { 'Results' : resultList }
print(json.dumps(j))
else:
for r in resultList:
rslt = r['result']
fld = r.get('FIELD',None)
if args.delete:
printDeleteResults(r)
elif args.write:
print(rslt)
elif args.interaction or args.stream:
printStreamResults(rslt, fld)
elif args.event:
printEventResults(rslt, fld)
elif args.perms:
printPerms(rslt, fld)
else:
print(rslt) # likely python dict output (use -j for JSON)
if args.statistics:
print("Statistics for {}:".format(nr))
for k in nr.statistics:
print("{:>24s}: {}".format(k, nr.statistics[k]))
if args.requestlog:
for rx in log_of_all_requests:
rq = rx[0]
print("{} {}".format(rq['http-method'], rq['url']))
if rq['jdict']:
print(" additional param dictionary: ", rq['jdict'])
print(" --> {}".format(rx[1]))
return exitStatus
try:
xstat = mainCommandProcessing(nrServer, args)
except NumerousError as x:
print("Server error: {} {} ({})".format(x.code, x.reason, x.details))
xstat = 1
exit(xstat)
| {
"content_hash": "2f5a0221e884ddbec51e58cf18262780",
"timestamp": "",
"source": "github",
"line_count": 1383,
"max_line_length": 157,
"avg_line_length": 38.417208966015906,
"alnum_prop": 0.553725696862472,
"repo_name": "outofmbufs/Nappy",
"id": "da0dd29ede9a1355eb1f4fed1420713d94e3cfed",
"size": "53155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shell-cmd/nr.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "103022"
},
{
"name": "Python",
"bytes": "152743"
},
{
"name": "Shell",
"bytes": "56543"
}
],
"symlink_target": ""
} |
from kubernetes_py.models.v1.PodTemplateSpec import PodTemplateSpec
from kubernetes_py.models.v1beta1.DeploymentStrategy import DeploymentStrategy
from kubernetes_py.models.v1beta1.LabelSelector import LabelSelector
from kubernetes_py.models.v1beta1.RollbackConfig import RollbackConfig
class DeploymentSpec(object):
"""
http://kubernetes.io/docs/api-reference/extensions/v1beta1/definitions/#_v1beta1_deploymentspec
"""
def __init__(self, model=None):
super(DeploymentSpec, self).__init__()
self._replicas = 1
self._selector = LabelSelector()
self._template = PodTemplateSpec()
self._strategy = DeploymentStrategy()
self._min_ready_seconds = 0
self._revision_history_limit = None
self._paused = False
self._rollback_to = None
if model is not None:
self._build_with_model(model)
def _build_with_model(self, model=None):
if "replicas" in model:
self.replicas = model["replicas"]
if "selector" in model:
self.selector = LabelSelector(model["selector"])
if "template" in model:
self.template = PodTemplateSpec(model["template"])
if "strategy" in model:
self.strategy = DeploymentStrategy(model["strategy"])
if "minReadySeconds" in model:
self.min_ready_seconds = model["minReadySeconds"]
if "revisionHistoryLimit" in model:
self.revision_history_limit = model["revisionHistoryLimit"]
if "paused" in model:
self.paused = model["paused"]
if "rollbackTo" in model:
self.rollback_to = model["rollbackTo"]
# ------------------------------------------------------------------------------------- replicas
@property
def replicas(self):
return self._replicas
@replicas.setter
def replicas(self, reps=None):
if not isinstance(reps, int):
raise SyntaxError("DeploymentSpec: replicas: [ {} ] is invalid".format(reps))
self._replicas = reps
# ------------------------------------------------------------------------------------- selector
@property
def selector(self):
return self._selector
@selector.setter
def selector(self, sel=None):
if not isinstance(sel, LabelSelector):
raise SyntaxError("DeploymentSpec: selector: [ {} ] is invalid".format(sel))
self._selector = sel
# ------------------------------------------------------------------------------------- template
@property
def template(self):
return self._template
@template.setter
def template(self, t=None):
if not isinstance(t, PodTemplateSpec):
raise SyntaxError("DeploymentSpec: template: [ {} ] is invalid".format(t))
self._template = t
# ------------------------------------------------------------------------------------- strategy
@property
def strategy(self):
return self._strategy
@strategy.setter
def strategy(self, strat=None):
if not isinstance(strat, DeploymentStrategy):
raise SyntaxError("DeploymentSpec: strategy: [ {} ] is invalid".format(strat))
self._strategy = strat
# ------------------------------------------------------------------------------------- minReadySeconds
@property
def min_ready_seconds(self):
return self._min_ready_seconds
@min_ready_seconds.setter
def min_ready_seconds(self, mrs=None):
if not isinstance(mrs, int):
raise SyntaxError("DeploymentSpec: min_ready_seconds: [ {} ] is invalid".format(mrs))
self._min_ready_seconds = mrs
# ------------------------------------------------------------------------------------- revisionHistoryLimit
@property
def revision_history_limit(self):
return self._revision_history_limit
@revision_history_limit.setter
def revision_history_limit(self, rhl=None):
if not isinstance(rhl, int):
raise SyntaxError("DeploymentSpec: revision_history_limit: [ {} ] is invalid".format(rhl))
self._revision_history_limit = rhl
# ------------------------------------------------------------------------------------- paused
@property
def paused(self):
return self._paused
@paused.setter
def paused(self, p=None):
if not isinstance(p, bool):
raise SyntaxError("DeploymentSpec: paused: [ {} ] is invalid".format(p))
self._paused = p
# ------------------------------------------------------------------------------------- rollbackTo
@property
def rollback_to(self):
return self._rollback_to
@rollback_to.setter
def rollback_to(self, rc=None):
if not isinstance(rc, RollbackConfig):
raise SyntaxError("DeploymentSpec: rollback_to: [ {} ] is invalid".format(rc))
self._rollback_to = rc
# ------------------------------------------------------------------------------------- serialize
def serialize(self):
data = {}
if self.replicas is not None:
data["replicas"] = self.replicas
if self.selector is not None:
data["selector"] = self.selector.serialize()
if self.template is not None:
data["template"] = self.template.serialize()
if self.strategy is not None:
data["strategy"] = self.strategy.serialize()
if self.min_ready_seconds is not None:
data["minReadySeconds"] = self.min_ready_seconds
if self.revision_history_limit is not None:
data["revisionHistoryLimit"] = self.revision_history_limit
if self.paused is not None:
data["paused"] = self.paused
if self.rollback_to is not None:
data["rollbackTo"] = self.rollback_to.serialize()
return data
| {
"content_hash": "cfdc0908b956f6cc24504268386acc99",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 112,
"avg_line_length": 36.53416149068323,
"alnum_prop": 0.5423325399523972,
"repo_name": "froch/kubernetes-py",
"id": "ff7870c228d5c5d797aa33e2a8fa630be708ccb4",
"size": "6060",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "kubernetes_py/models/v1beta1/DeploymentSpec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1042823"
},
{
"name": "Shell",
"bytes": "423"
}
],
"symlink_target": ""
} |
from calvin.actor.actor import Actor, manage, condition, stateguard
class Deselect(Actor):
"""
Route token from 'case_true' or 'case_false' to 'data' port depending on 'select'
Deselect assumes 'false' or 'true' as input, values outside that
range will default to 'case_false'.
Inputs:
case_false : Token to output 'data' if select token is 'false'
case_true : Token to output 'data' if select token is 'true'
select : Select which inport will propagate to 'data' port
Outputs:
data : Token from 'case_true' or 'case_false' port
"""
@manage(['select'])
def init(self):
self.select = None
@stateguard(lambda self: self.select is None)
@condition(['select'], [])
def select_action(self, select):
self.select = select is True
# Default to false if select value is not true or false
@stateguard(lambda self: self.select is False)
@condition(['case_false'], ['data'])
def false_action(self, data):
self.select = None
return (data, )
@stateguard(lambda self : self.select is True)
@condition(['case_true'], ['data'])
def true_action(self, data):
self.select = None
return (data, )
action_priority = (false_action, true_action, select_action)
test_set = [
{
'inports': {
'case_false': ['a', 'b', 'c'],
'case_true':['A', 'B', 'C'],
'select': [True, False]*3
},
'data': {'port': ['A', 'a', 'B', 'b', 'C', 'c']},
},
]
| {
"content_hash": "f125a18c31ce1c55935d0568e96f2f20",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 85,
"avg_line_length": 29.036363636363635,
"alnum_prop": 0.5604257983719474,
"repo_name": "EricssonResearch/calvin-base",
"id": "c948e90e0ec2b22505009021cdab943084998159",
"size": "2202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calvin/actorstore/systemactors/flow/Deselect.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "769"
},
{
"name": "Dockerfile",
"bytes": "612"
},
{
"name": "HTML",
"bytes": "24571"
},
{
"name": "JavaScript",
"bytes": "78325"
},
{
"name": "Makefile",
"bytes": "816"
},
{
"name": "Python",
"bytes": "3291484"
},
{
"name": "Shell",
"bytes": "37140"
}
],
"symlink_target": ""
} |
from airflow.utils import import_module_attrs as _import_module_attrs
# These need to be integrated first as other operators depend on them
_import_module_attrs(globals(), {
'check_operator': [
'CheckOperator',
'ValueCheckOperator',
'IntervalCheckOperator',
],
})
_operators = {
'bash_operator': ['BashOperator'],
'python_operator': [
'PythonOperator',
'BranchPythonOperator',
'ShortCircuitOperator',
],
'hive_operator': ['HiveOperator'],
'presto_check_operator': [
'PrestoCheckOperator',
'PrestoValueCheckOperator',
'PrestoIntervalCheckOperator',
],
'dummy_operator': ['DummyOperator'],
'email_operator': ['EmailOperator'],
'hive_to_samba_operator': ['Hive2SambaOperator'],
'mysql_operator': ['MySqlOperator'],
'sqlite_operator': ['SqliteOperator'],
'mysql_to_hive': ['MySqlToHiveTransfer'],
'postgres_operator': ['PostgresOperator'],
'sensors': [
'BaseSensorOperator',
'ExternalTaskSensor',
'HdfsSensor',
'HivePartitionSensor',
'HttpSensor',
'MetastorePartitionSensor',
'S3KeySensor',
'S3PrefixSensor',
'SqlSensor',
'TimeDeltaSensor',
'TimeSensor',
'WebHdfsSensor',
],
'subdag_operator': ['SubDagOperator'],
'hive_stats_operator': ['HiveStatsCollectionOperator'],
's3_to_hive_operator': ['S3ToHiveTransfer'],
'hive_to_mysql': ['HiveToMySqlTransfer'],
's3_file_transform_operator': ['S3FileTransformOperator'],
'http_operator': ['SimpleHttpOperator'],
'hive_to_druid': ['HiveToDruidTransfer'],
'jdbc_operator': ['JdbcOperator'],
'mssql_operator': ['MsSqlOperator'],
'mssql_to_hive': ['MsSqlToHiveTransfer'],
'slack_operator': ['SlackAPIOperator', 'SlackAPIPostOperator'],
'generic_transfer': ['GenericTransfer'],
}
_import_module_attrs(globals(), _operators)
from airflow.models import BaseOperator
def integrate_plugins():
"""Integrate plugins to the context"""
from airflow.plugins_manager import operators as _operators
for _operator in _operators:
globals()[_operator.__name__] = _operator
| {
"content_hash": "d1714bf89f45ae5c4a873c589ee33ce1",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 69,
"avg_line_length": 32.411764705882355,
"alnum_prop": 0.6415607985480943,
"repo_name": "storpipfugl/airflow",
"id": "535abe8cd8e9e94d185f3856575d014b6f0f71c1",
"size": "2310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/operators/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36119"
},
{
"name": "HTML",
"bytes": "95588"
},
{
"name": "JavaScript",
"bytes": "895747"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "571387"
},
{
"name": "Shell",
"bytes": "5301"
}
],
"symlink_target": ""
} |
"""Distributional RL agent using quantile regression.
This loss is computed as in "Distributional Reinforcement Learning with Quantile
Regression" - Dabney et. al, 2017"
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from batch_rl.multi_head import atari_helpers
from dopamine.agents.dqn import dqn_agent
from dopamine.agents.rainbow import rainbow_agent
import gin
import tensorflow.compat.v1 as tf
@gin.configurable
class QuantileAgent(rainbow_agent.RainbowAgent):
"""An extension of Rainbow to perform quantile regression."""
def __init__(self,
sess,
num_actions,
kappa=1.0,
network=atari_helpers.QuantileNetwork,
num_atoms=200,
gamma=0.99,
update_horizon=1,
min_replay_history=50000,
update_period=4,
target_update_period=10000,
epsilon_fn=dqn_agent.linearly_decaying_epsilon,
epsilon_train=0.1,
epsilon_eval=0.05,
epsilon_decay_period=1000000,
replay_scheme='prioritized',
tf_device='/cpu:0',
optimizer=tf.train.AdamOptimizer(
learning_rate=0.00005, epsilon=0.0003125),
summary_writer=None,
summary_writing_frequency=500):
"""Initializes the agent and constructs the Graph.
Args:
sess: A `tf.Session` object for running associated ops.
num_actions: Int, number of actions the agent can take at any state.
kappa: Float, Huber loss cutoff.
network: tf.Keras.Model, expects 3 parameters: num_actions, num_atoms,
network_type. A call to this object will return an instantiation of the
network provided. The network returned can be run with different inputs
to create different outputs. See atari_helpers.QuantileNetwork
as an example.
num_atoms: Int, the number of buckets for the value function distribution.
gamma: Float, exponential decay factor as commonly used in the RL
literature.
update_horizon: Int, horizon at which updates are performed, the 'n' in
n-step update.
min_replay_history: Int, number of stored transitions for training to
start.
update_period: Int, period between DQN updates.
target_update_period: Int, ppdate period for the target network.
epsilon_fn: Function expecting 4 parameters: (decay_period, step,
warmup_steps, epsilon), and which returns the epsilon value used for
exploration during training.
epsilon_train: Float, final epsilon for training.
epsilon_eval: Float, epsilon during evaluation.
epsilon_decay_period: Int, number of steps for epsilon to decay.
replay_scheme: String, replay memory scheme to be used. Choices are:
uniform - Standard (DQN) replay buffer (Mnih et al., 2015)
prioritized - Prioritized replay buffer (Schaul et al., 2015)
tf_device: Tensorflow device with which the value function is computed
and trained.
optimizer: A `tf.train.Optimizer` object for training the model.
summary_writer: SummaryWriter object for outputting training statistics.
Summary writing disabled if set to None.
summary_writing_frequency: int, frequency with which summaries will be
written. Lower values will result in slower training.
"""
self.kappa = kappa
super(QuantileAgent, self).__init__(
sess=sess,
num_actions=num_actions,
network=network,
num_atoms=num_atoms,
gamma=gamma,
update_horizon=update_horizon,
min_replay_history=min_replay_history,
update_period=update_period,
target_update_period=target_update_period,
epsilon_fn=epsilon_fn,
epsilon_train=epsilon_train,
epsilon_eval=epsilon_eval,
epsilon_decay_period=epsilon_decay_period,
replay_scheme=replay_scheme,
tf_device=tf_device,
optimizer=optimizer,
summary_writer=summary_writer,
summary_writing_frequency=summary_writing_frequency)
def _create_network(self, name):
"""Builds a Quantile ConvNet.
Equivalent to Rainbow ConvNet, only now the output logits are interpreted
as quantiles.
Args:
name: str, this name is passed to the tf.keras.Model and used to create
variable scope under the hood by the tf.keras.Model.
Returns:
network: tf.keras.Model, the network instantiated by the Keras model.
"""
network = self.network(self.num_actions, self._num_atoms, name=name)
return network
def _build_target_distribution(self):
batch_size = tf.shape(self._replay.rewards)[0]
# size of rewards: batch_size x 1
rewards = self._replay.rewards[:, None]
# size of tiled_support: batch_size x num_atoms
is_terminal_multiplier = 1. - tf.cast(self._replay.terminals, tf.float32)
# Incorporate terminal state to discount factor.
# size of gamma_with_terminal: batch_size x 1
gamma_with_terminal = self.cumulative_gamma * is_terminal_multiplier
gamma_with_terminal = gamma_with_terminal[:, None]
# size of next_qt_argmax: 1 x batch_size
next_qt_argmax = tf.argmax(
self._replay_next_target_net_outputs.q_values, axis=1)[:, None]
batch_indices = tf.range(tf.to_int64(batch_size))[:, None]
# size of next_qt_argmax: batch_size x 2
batch_indexed_next_qt_argmax = tf.concat(
[batch_indices, next_qt_argmax], axis=1)
# size of next_logits (next quantiles): batch_size x num_atoms
next_logits = tf.gather_nd(
self._replay_next_target_net_outputs.logits,
batch_indexed_next_qt_argmax)
return rewards + gamma_with_terminal * next_logits
def _build_train_op(self):
"""Builds a training op.
Returns:
train_op: An op performing one step of training.
"""
target_distribution = tf.stop_gradient(self._build_target_distribution())
# size of indices: batch_size x 1.
indices = tf.range(tf.shape(self._replay_net_outputs.logits)[0])[:, None]
# size of reshaped_actions: batch_size x 2.
reshaped_actions = tf.concat([indices, self._replay.actions[:, None]], 1)
# For each element of the batch, fetch the logits for its selected action.
chosen_action_logits = tf.gather_nd(self._replay_net_outputs.logits,
reshaped_actions)
bellman_errors = (target_distribution[:, None, :] -
chosen_action_logits[:, :, None]) # Input `u' of Eq. 9.
huber_loss = ( # Eq. 9 of paper.
tf.to_float(tf.abs(bellman_errors) <= self.kappa) *
0.5 * bellman_errors ** 2 +
tf.to_float(tf.abs(bellman_errors) > self.kappa) *
self.kappa * (tf.abs(bellman_errors) - 0.5 * self.kappa))
tau_hat = ((tf.range(self._num_atoms, dtype=tf.float32) + 0.5) /
self._num_atoms) # Quantile midpoints. See Lemma 2 of paper.
quantile_huber_loss = ( # Eq. 10 of paper.
tf.abs(tau_hat[None, :, None] - tf.to_float(bellman_errors < 0)) *
huber_loss)
# Sum over tau dimension, average over target value dimension.
loss = tf.reduce_sum(tf.reduce_mean(quantile_huber_loss, 2), 1)
if self._replay_scheme == 'prioritized':
target_priorities = self._replay.tf_get_priority(self._replay.indices)
# The original prioritized experience replay uses a linear exponent
# schedule 0.4 -> 1.0. Comparing the schedule to a fixed exponent of 0.5
# on 5 games (Asterix, Pong, Q*Bert, Seaquest, Space Invaders) suggested
# a fixed exponent actually performs better, except on Pong.
loss_weights = 1.0 / tf.sqrt(target_priorities + 1e-10)
loss_weights /= tf.reduce_max(loss_weights)
# Rainbow and prioritized replay are parametrized by an exponent alpha,
# but in both cases it is set to 0.5 - for simplicity's sake we leave it
# as is here, using the more direct tf.sqrt(). Taking the square root
# "makes sense", as we are dealing with a squared loss.
# Add a small nonzero value to the loss to avoid 0 priority items. While
# technically this may be okay, setting all items to 0 priority will cause
# troubles, and also result in 1.0 / 0.0 = NaN correction terms.
update_priorities_op = self._replay.tf_set_priority(
self._replay.indices, tf.sqrt(loss + 1e-10))
# Weight loss by inverse priorities.
loss = loss_weights * loss
else:
update_priorities_op = tf.no_op()
with tf.control_dependencies([update_priorities_op]):
if self.summary_writer is not None:
with tf.variable_scope('Losses'):
tf.summary.scalar('QuantileLoss', tf.reduce_mean(loss))
return self.optimizer.minimize(tf.reduce_mean(loss)), loss
| {
"content_hash": "25a912335492550f5f66e0276505789e",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 80,
"avg_line_length": 43.49756097560976,
"alnum_prop": 0.6585174386004261,
"repo_name": "google-research/batch_rl",
"id": "df0a3d2a6ae9eeae254f5c415c3a4770a895c967",
"size": "9525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "batch_rl/multi_head/quantile_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "105413"
}
],
"symlink_target": ""
} |
"""Entity, follower and following documents"""
__all__ = ['Entity']
from warnings import warn
from mongoengine import *
from mongoengine.queryset import DoesNotExist
from mongoengine.signals import post_save
from tentd.documents import *
from tentd.utils import json_attributes, set_attributes
class QuerySetProperty(object):
"""A set of documents belonging to an entity from another collection
Basically, provides the functionality a backref would provide."""
def __init__(self, cls):
self.cls = cls
def __get__(self, instance, owner):
return self.cls.objects(entity=instance)
class Entity(db.Document):
"""A tent entity"""
meta = {
'allow_inheritance': False,
'indexes': ['name'],
}
#: The name used as the entities api root
name = StringField(max_length=100, required=True, unique=True)
# Querysets belonging to the Entity
profiles = QuerySetProperty(Profile)
posts = QuerySetProperty(Post)
followers = QuerySetProperty(Follower)
followings = QuerySetProperty(Following)
notifications = QuerySetProperty(Notification)
groups = QuerySetProperty(Group)
@property
def core(self):
"""Fetch the core profile for the entity"""
try:
return Profile.objects.get(schema=CoreProfile.__schema__)
except DoesNotExist:
raise Exception("Entity has no core profile.")
@classmethod
def new(cls, **kwargs):
"""Constucts a Post and an initial version from the same args"""
entity = cls()
set_attributes(Entity, entity, kwargs)
set_attributes(CoreProfile, entity.core, kwargs)
return entity
def create_core(self, **kwargs):
"""Creates a coreprofile instance attached to this entity"""
warn(DeprecationWarning(
"create_core() has been replaced by Entity.new()"))
return CoreProfile(entity=self, **kwargs).save()
def __repr__(self):
return "<Entity '{}' [{}]>".format(self.name, self.id)
def __str__(self):
"""Returns self.name
Avoid changing this behaviour, as it allows the entity to be used in
url_for calls without explicitly stating that the name is being used
"""
return self.name
def to_json(self):
return json_attributes(self,
'name',
'profiles',
'followers',
'followings',
'notifications')
@staticmethod
def post_save(sender, document, **kwargs):
"""Signal function to automatically create a core profile"""
try:
CoreProfile.objects.get(entity=document)
except CoreProfile.DoesNotExist:
CoreProfile(entity=document).save()
post_save.connect(Entity.post_save, sender=Entity)
| {
"content_hash": "3ad23ef6225672dec1768f23b6977054",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 76,
"avg_line_length": 30.204301075268816,
"alnum_prop": 0.642933428266287,
"repo_name": "pytent/pytentd",
"id": "7ae2a18b290669874dae46474237cd59ee9c3b93",
"size": "2809",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tentd/documents/entity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1261"
},
{
"name": "Python",
"bytes": "104415"
}
],
"symlink_target": ""
} |
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.views import APIView
from weather.models import Weather
from weather.serializers import WeatherSerializer
import datetime
class WeatherReporter(APIView):
def reporter(params):
if params["Date"] == ["00-00"]:
message = "we can't find Date value"
return params, message
try:
params["City"]
except KeyError:
message = "we can't find City value"
return params, message
date = params["Date"][0]
city = params["City"][0]
if date == "오늘":
data = WeatherReporter.WeatherToday(city)
elif date == "내일":
data = WeatherReporter.WeatherTomorrow(city)
message = ":D"
return data, message
def WeatherToday(city):
date = datetime.date.today()
obj = Weather.objects.filter(city_name__icontains=city).filter(current_date=date)
data = WeatherSerializer(obj, many=True)
return data.data
def WeatherTomorrow(city):
date = datetime.date.today() + datetime.timedelta(1)
obj = Weather.objects.filter(city_name__icontains=city).filter(current_date=date)
data = WeatherSerializer(obj, many=True)
return data.data
| {
"content_hash": "bba1252c72db7f29a031590731c34bb2",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 89,
"avg_line_length": 34.525,
"alnum_prop": 0.6357711803041275,
"repo_name": "TeamEmily/Emily_server",
"id": "2b39289130c9ec2c14c2478db3960a695eaa0246",
"size": "1389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "weather/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "21964"
},
{
"name": "Python",
"bytes": "67402"
},
{
"name": "Shell",
"bytes": "61"
}
],
"symlink_target": ""
} |
from google.cloud import logging_v2
async def sample_delete_bucket():
# Create a client
client = logging_v2.ConfigServiceV2AsyncClient()
# Initialize request argument(s)
request = logging_v2.DeleteBucketRequest(
name="name_value",
)
# Make the request
await client.delete_bucket(request=request)
# [END logging_v2_generated_ConfigServiceV2_DeleteBucket_async]
| {
"content_hash": "3e5cfd8f8d152e4dacc25ced8eb7f620",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 63,
"avg_line_length": 23.647058823529413,
"alnum_prop": 0.7164179104477612,
"repo_name": "googleapis/gapic-generator-python",
"id": "6633aa3c4344afe75d7889133f129a062bf4b4be",
"size": "1788",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/integration/goldens/logging/samples/generated_samples/logging_v2_generated_config_service_v2_delete_bucket_async.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2702"
},
{
"name": "Jinja",
"bytes": "767902"
},
{
"name": "Python",
"bytes": "4802905"
},
{
"name": "Shell",
"bytes": "31013"
},
{
"name": "Starlark",
"bytes": "26281"
}
],
"symlink_target": ""
} |
"""Tests for compiler_opt.tools.extract_ir."""
# pylint: disable=protected-access
import os.path
from absl import flags
from absl.testing import absltest
from compiler_opt.tools import extract_ir
flags.FLAGS['num_workers'].allow_override = True
class ExtractIrTest(absltest.TestCase):
def test_one_conversion(self):
obj = extract_ir.convert_compile_command_to_objectfile(
{
'directory': '/output/directory',
'command': '-cc1 -c /some/path/lib/foo/bar.cc -o lib/bar.o',
'file': '/some/path/lib/foo/bar.cc'
}, '/corpus/destination/path')
self.assertIsNotNone(obj)
# pytype: disable=attribute-error
# Pytype complains about obj being None
self.assertEqual(obj.input_obj(), '/output/directory/lib/bar.o')
self.assertEqual(obj.relative_output_path(), 'lib/bar.o')
self.assertEqual(obj.cmd_file(), '/corpus/destination/path/lib/bar.o.cmd')
self.assertEqual(obj.bc_file(), '/corpus/destination/path/lib/bar.o.bc')
self.assertEqual(obj.thinlto_index_file(),
'/corpus/destination/path/lib/bar.o.thinlto.bc')
# pytype: enable=attribute-error
def test_arr_conversion(self):
res = extract_ir.load_from_compile_commands([{
'directory': '/output/directory',
'command': '-cc1 -c /some/path/lib/foo/bar.cc -o lib/bar.o',
'file': '/some/path/lib/foo/bar.cc'
}, {
'directory': '/output/directory',
'command': '-cc1 -c /some/path/lib/foo/baz.cc -o lib/other/baz.o',
'file': '/some/path/lib/foo/baz.cc'
}], '/corpus/destination/path')
res = list(res)
self.assertLen(res, 2)
self.assertEqual(res[0].input_obj(), '/output/directory/lib/bar.o')
self.assertEqual(res[0].relative_output_path(), 'lib/bar.o')
self.assertEqual(res[0].cmd_file(),
'/corpus/destination/path/lib/bar.o.cmd')
self.assertEqual(res[0].bc_file(), '/corpus/destination/path/lib/bar.o.bc')
self.assertEqual(res[0].thinlto_index_file(),
'/corpus/destination/path/lib/bar.o.thinlto.bc')
self.assertEqual(res[1].input_obj(), '/output/directory/lib/other/baz.o')
self.assertEqual(res[1].relative_output_path(), 'lib/other/baz.o')
self.assertEqual(res[1].cmd_file(),
'/corpus/destination/path/lib/other/baz.o.cmd')
self.assertEqual(res[1].bc_file(),
'/corpus/destination/path/lib/other/baz.o.bc')
self.assertEqual(res[1].thinlto_index_file(),
'/corpus/destination/path/lib/other/baz.o.thinlto.bc')
def test_command_extraction(self):
obj = extract_ir.TrainingIRExtractor(
obj_relative_path='lib/obj_file.o',
output_base_dir='/where/corpus/goes',
obj_base_dir='/foo/bar')
self.assertEqual(
obj._get_extraction_cmd_command('/bin/llvm_objcopy_path'), [
'/bin/llvm_objcopy_path',
'--dump-section=.llvmcmd=/where/corpus/goes/lib/obj_file.o.cmd',
'/foo/bar/lib/obj_file.o', '/dev/null'
])
self.assertEqual(
obj._get_extraction_bc_command('/bin/llvm_objcopy_path'), [
'/bin/llvm_objcopy_path',
'--dump-section=.llvmbc=/where/corpus/goes/lib/obj_file.o.bc',
'/foo/bar/lib/obj_file.o', '/dev/null'
])
def test_command_extraction_no_basedir(self):
obj = extract_ir.TrainingIRExtractor('lib/obj_file.o', '/where/corpus/goes')
self.assertEqual(
obj._get_extraction_cmd_command('/bin/llvm_objcopy_path'), [
'/bin/llvm_objcopy_path',
'--dump-section=.llvmcmd=/where/corpus/goes/lib/obj_file.o.cmd',
'lib/obj_file.o', '/dev/null'
])
self.assertEqual(
obj._get_extraction_bc_command('/bin/llvm_objcopy_path'), [
'/bin/llvm_objcopy_path',
'--dump-section=.llvmbc=/where/corpus/goes/lib/obj_file.o.bc',
'lib/obj_file.o', '/dev/null'
])
def test_lld_params(self):
lld_opts = [
'-o', 'output/dir/exe', 'lib/obj1.o', 'somelib.a', '-W,blah',
'lib/dir/obj2.o'
]
obj = extract_ir.load_from_lld_params(lld_opts, '/some/path', '/tmp/out')
self.assertLen(obj, 2)
self.assertEqual(obj[0].input_obj(), '/some/path/lib/obj1.o')
self.assertEqual(obj[0].relative_output_path(), 'lib/obj1.o')
self.assertEqual(obj[0].cmd_file(), '/tmp/out/lib/obj1.o.cmd')
self.assertEqual(obj[0].thinlto_index_file(),
'/tmp/out/lib/obj1.o.thinlto.bc')
self.assertEqual(obj[1].input_obj(), '/some/path/lib/dir/obj2.o')
def test_lld_thinlto_discovery(self):
tempdir = self.create_tempdir()
tempdir.create_file(file_path='1.3.import.bc')
tempdir.create_file(file_path='2.3.import.bc')
tempdir.create_file(file_path='3.3.import.bc')
tempdir.create_file(file_path='1.thinlto.bc')
tempdir.create_file(file_path='2.thinlto.bc')
tempdir.create_file(file_path='3.thinlto.bc')
outdir = self.create_tempdir()
obj = extract_ir.load_for_lld_thinlto(tempdir.full_path, outdir.full_path)
self.assertLen(obj, 3)
for i, o in enumerate(sorted(obj, key=lambda x: x._obj_relative_path)):
self.assertEqual(o._obj_relative_path, f'{i + 1:d}')
self.assertEqual(o._obj_base_dir, tempdir.full_path)
self.assertEqual(o._output_base_dir, outdir.full_path)
def test_lld_thinlto_discovery_nested(self):
outer = self.create_tempdir()
tempdir = outer.mkdir(dir_path='nest')
tempdir.create_file(file_path='1.3.import.bc')
tempdir.create_file(file_path='2.3.import.bc')
tempdir.create_file(file_path='3.3.import.bc')
tempdir.create_file(file_path='1.thinlto.bc')
tempdir.create_file(file_path='2.thinlto.bc')
tempdir.create_file(file_path='3.thinlto.bc')
outdir = self.create_tempdir()
obj = extract_ir.load_for_lld_thinlto(outer.full_path, outdir.full_path)
self.assertLen(obj, 3)
for i, o in enumerate(sorted(obj, key=lambda x: x._obj_relative_path)):
self.assertEqual(o._obj_relative_path, f'nest/{i + 1:d}')
self.assertEqual(o._obj_base_dir, outer.full_path)
self.assertEqual(o._output_base_dir, outdir.full_path)
def test_lld_thinlto_extraction(self):
outer = self.create_tempdir()
tempdir = outer.mkdir(dir_path='nest')
tempdir.create_file(file_path='1.3.import.bc')
tempdir.create_file(file_path='2.3.import.bc')
tempdir.create_file(file_path='3.3.import.bc')
tempdir.create_file(file_path='1.thinlto.bc')
tempdir.create_file(file_path='2.thinlto.bc')
tempdir.create_file(file_path='3.thinlto.bc')
outdir = self.create_tempdir()
obj = extract_ir.load_for_lld_thinlto(outer.full_path, outdir.full_path)
for i, o in enumerate(sorted(obj, key=lambda x: x._obj_relative_path)):
mod_path = o.extract(thinlto_build='local')
self.assertEqual(mod_path, f'nest/{i + 1:d}')
self.assertTrue(os.path.exists(os.path.join(outdir.full_path, 'nest/1.bc')))
self.assertTrue(os.path.exists(os.path.join(outdir.full_path, 'nest/2.bc')))
self.assertTrue(os.path.exists(os.path.join(outdir.full_path, 'nest/3.bc')))
self.assertTrue(
os.path.exists(os.path.join(outdir.full_path, 'nest/1.thinlto.bc')))
self.assertTrue(
os.path.exists(os.path.join(outdir.full_path, 'nest/2.thinlto.bc')))
self.assertTrue(
os.path.exists(os.path.join(outdir.full_path, 'nest/3.thinlto.bc')))
def test_filtering(self):
cmdline = '-cc1\0x/y/foobar.cpp\0-Oz\0-Ifoo\0-o\0bin/out.o'
self.assertTrue(extract_ir.should_include_module(cmdline, None))
self.assertTrue(extract_ir.should_include_module(cmdline, '.*'))
self.assertTrue(extract_ir.should_include_module(cmdline, '^-Oz$'))
self.assertFalse(extract_ir.should_include_module(cmdline, '^-O3$'))
def test_thinlto_index_extractor(self):
cmdline = ('-cc1\0x/y/foobar.cpp\0-Oz\0-Ifoo\0-o\0bin/'
'out.o\0-fthinlto-index=foo/bar.thinlto.bc')
self.assertEqual(
extract_ir.get_thinlto_index(cmdline, '/the/base/dir'),
'/the/base/dir/foo/bar.thinlto.bc')
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "c3c510284d1d18eee9dddbe3ccb34cda",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 80,
"avg_line_length": 44.55191256830601,
"alnum_prop": 0.6407457377652398,
"repo_name": "google/ml-compiler-opt",
"id": "95c70be98ec939a29a0bd68340273ea2c2b33af1",
"size": "8743",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "compiler_opt/tools/extract_ir_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "358790"
},
{
"name": "Shell",
"bytes": "14456"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^app/', include('app.foo.urls')),
# Uncomment the next line to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line for to enable the admin:
# (r'^admin/(.*)', admin.site.root),
)
| {
"content_hash": "4853abcc475bd77bca1cced83f3760b1",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 65,
"avg_line_length": 30,
"alnum_prop": 0.6458333333333334,
"repo_name": "miracle2k/feedplatform",
"id": "4115354894f6fc143f922e2443a2ef55c5b52379",
"size": "480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/django/aggregator/urls.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "463892"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.conf import settings
import pika
import logging
import ujson
import random
import time
import threading
import atexit
from collections import defaultdict
from zerver.lib.utils import statsd
from typing import Any, Callable
# This simple queuing library doesn't expose much of the power of
# rabbitmq/pika's queuing system; its purpose is to just provide an
# interface for external files to put things into queues and take them
# out from bots without having to import pika code all over our codebase.
class SimpleQueueClient(object):
def __init__(self):
self.log = logging.getLogger('zulip.queue')
self.queues = set() # type: Set[str]
self.channel = None # type: Any
self.consumers = defaultdict(set) # type: Dict[str, Set[Any]]
# Disable RabbitMQ heartbeats since BlockingConnection can't process them
self.rabbitmq_heartbeat = 0
self._connect()
def _connect(self):
start = time.time()
self.connection = pika.BlockingConnection(self._get_parameters())
self.channel = self.connection.channel()
self.log.info('SimpleQueueClient connected (connecting took %.3fs)' % (time.time() - start,))
def _reconnect(self):
self.connection = None
self.channel = None
self.queues = set()
self._connect()
def _get_parameters(self):
# We explicitly disable the RabbitMQ heartbeat feature, since
# it doesn't make sense with BlockingConnection
credentials = pika.PlainCredentials(settings.RABBITMQ_USERNAME,
settings.RABBITMQ_PASSWORD)
return pika.ConnectionParameters(settings.RABBITMQ_HOST,
heartbeat_interval=self.rabbitmq_heartbeat,
credentials=credentials)
def _generate_ctag(self, queue_name):
return "%s_%s" % (queue_name, str(random.getrandbits(16)))
def _reconnect_consumer_callback(self, queue, consumer):
self.log.info("Queue reconnecting saved consumer %s to queue %s" % (consumer, queue))
self.ensure_queue(queue, lambda: self.channel.basic_consume(consumer,
queue=queue,
consumer_tag=self._generate_ctag(queue)))
def _reconnect_consumer_callbacks(self):
for queue, consumers in self.consumers.items():
for consumer in consumers:
self._reconnect_consumer_callback(queue, consumer)
def close(self):
if self.connection:
self.connection.close()
def ready(self):
return self.channel is not None
def ensure_queue(self, queue_name, callback):
'''Ensure that a given queue has been declared, and then call
the callback with no arguments.'''
if not self.connection.is_open:
self._connect()
if queue_name not in self.queues:
self.channel.queue_declare(queue=queue_name, durable=True)
self.queues.add(queue_name)
callback()
def publish(self, queue_name, body):
def do_publish():
self.channel.basic_publish(
exchange='',
routing_key=queue_name,
properties=pika.BasicProperties(delivery_mode=2),
body=body)
statsd.incr("rabbitmq.publish.%s" % (queue_name,))
self.ensure_queue(queue_name, do_publish)
def json_publish(self, queue_name, body):
try:
return self.publish(queue_name, ujson.dumps(body))
except (AttributeError, pika.exceptions.AMQPConnectionError):
self.log.warning("Failed to send to rabbitmq, trying to reconnect and send again")
self._reconnect()
return self.publish(queue_name, ujson.dumps(body))
def register_consumer(self, queue_name, consumer):
def wrapped_consumer(ch, method, properties, body):
try:
consumer(ch, method, properties, body)
ch.basic_ack(delivery_tag=method.delivery_tag)
except Exception as e:
ch.basic_nack(delivery_tag=method.delivery_tag)
raise e
self.consumers[queue_name].add(wrapped_consumer)
self.ensure_queue(queue_name,
lambda: self.channel.basic_consume(wrapped_consumer, queue=queue_name,
consumer_tag=self._generate_ctag(queue_name)))
def register_json_consumer(self, queue_name, callback):
def wrapped_callback(ch, method, properties, body):
return callback(ujson.loads(body))
return self.register_consumer(queue_name, wrapped_callback)
def drain_queue(self, queue_name, json=False):
"Returns all messages in the desired queue"
messages = []
def opened():
while True:
(meta, _, message) = self.channel.basic_get(queue_name)
if not message:
break;
self.channel.basic_ack(meta.delivery_tag)
if json:
message = ujson.loads(message)
messages.append(message)
self.ensure_queue(queue_name, opened)
return messages
def start_consuming(self):
self.channel.start_consuming()
def stop_consuming(self):
self.channel.stop_consuming()
# Patch pika.adapters.TornadoConnection so that a socket error doesn't
# throw an exception and disconnect the tornado process from the rabbitmq
# queue. Instead, just re-connect as usual
class ExceptionFreeTornadoConnection(pika.adapters.TornadoConnection):
def _adapter_disconnect(self):
try:
super(ExceptionFreeTornadoConnection, self)._adapter_disconnect()
except (pika.exceptions.ProbableAuthenticationError,
pika.exceptions.ProbableAccessDeniedError,
pika.exceptions.IncompatibleProtocolError) as e:
logging.warning("Caught exception '%r' in ExceptionFreeTornadoConnection when \
calling _adapter_disconnect, ignoring" % (e,))
class TornadoQueueClient(SimpleQueueClient):
# Based on:
# https://pika.readthedocs.io/en/0.9.8/examples/asynchronous_consumer_example.html
def __init__(self):
super(TornadoQueueClient, self).__init__()
# Enable rabbitmq heartbeat since TornadoConection can process them
self.rabbitmq_heartbeat = None
self._on_open_cbs = [] # type: List[Callable[[], None]]
def _connect(self, on_open_cb = None):
self.log.info("Beginning TornadoQueueClient connection")
if on_open_cb:
self._on_open_cbs.append(on_open_cb)
self.connection = ExceptionFreeTornadoConnection(
self._get_parameters(),
on_open_callback = self._on_open,
stop_ioloop_on_close = False)
self.connection.add_on_close_callback(self._on_connection_closed)
def _reconnect(self):
self.connection = None
self.channel = None
self.queues = set()
self._connect()
def _on_open(self, connection):
self.connection.channel(
on_open_callback = self._on_channel_open)
def _on_channel_open(self, channel):
self.channel = channel
for callback in self._on_open_cbs:
callback()
self._reconnect_consumer_callbacks()
self.log.info('TornadoQueueClient connected')
def _on_connection_closed(self, connection, reply_code, reply_text):
self.log.warning("TornadoQueueClient lost connection to RabbitMQ, reconnecting...")
from tornado import ioloop
# Try to reconnect in two seconds
retry_seconds = 2
def on_timeout():
try:
self._reconnect()
except pika.exceptions.AMQPConnectionError:
self.log.critical("Failed to reconnect to RabbitMQ, retrying...")
ioloop.IOLoop.instance().add_timeout(time.time() + retry_seconds, on_timeout)
ioloop.IOLoop.instance().add_timeout(time.time() + retry_seconds, on_timeout)
def ensure_queue(self, queue_name, callback):
def finish(frame):
self.queues.add(queue_name)
callback()
if queue_name not in self.queues:
# If we're not connected yet, send this message
# once we have created the channel
if not self.ready():
self._on_open_cbs.append(lambda: self.ensure_queue(queue_name, callback))
return
self.channel.queue_declare(queue=queue_name, durable=True, callback=finish)
else:
callback()
def register_consumer(self, queue_name, consumer):
def wrapped_consumer(ch, method, properties, body):
consumer(ch, method, properties, body)
ch.basic_ack(delivery_tag=method.delivery_tag)
if not self.ready():
self.consumers[queue_name].add(wrapped_consumer)
return
self.consumers[queue_name].add(wrapped_consumer)
self.ensure_queue(queue_name,
lambda: self.channel.basic_consume(wrapped_consumer, queue=queue_name,
consumer_tag=self._generate_ctag(queue_name)))
queue_client = None # type: SimpleQueueClient
def get_queue_client():
global queue_client
if queue_client is None:
if settings.RUNNING_INSIDE_TORNADO and settings.USING_RABBITMQ:
queue_client = TornadoQueueClient()
elif settings.USING_RABBITMQ:
queue_client = SimpleQueueClient()
return queue_client
def setup_tornado_rabbitmq():
# When tornado is shut down, disconnect cleanly from rabbitmq
if settings.USING_RABBITMQ:
atexit.register(lambda: queue_client.close())
# We using a simple lock to prevent multiple RabbitMQ messages being
# sent to the SimpleQueueClient at the same time; this is a workaround
# for an issue with the pika BlockingConnection where using
# BlockingConnection for multiple queues causes the channel to
# randomly close.
queue_lock = threading.RLock()
def queue_json_publish(queue_name, event, processor):
with queue_lock:
if settings.USING_RABBITMQ:
get_queue_client().json_publish(queue_name, event)
else:
processor(event)
| {
"content_hash": "9c9fd169bfe3dc214bf74565e75e43a1",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 109,
"avg_line_length": 38.68265682656826,
"alnum_prop": 0.627778307736335,
"repo_name": "Frouk/zulip",
"id": "c08d8dec73e7f4f70a00e79ab83792aac3168615",
"size": "10483",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "zerver/lib/queue.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "164"
},
{
"name": "CSS",
"bytes": "183514"
},
{
"name": "CoffeeScript",
"bytes": "18435"
},
{
"name": "Groovy",
"bytes": "5516"
},
{
"name": "HTML",
"bytes": "395036"
},
{
"name": "JavaScript",
"bytes": "1582587"
},
{
"name": "Nginx",
"bytes": "1228"
},
{
"name": "PHP",
"bytes": "18930"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "383634"
},
{
"name": "Puppet",
"bytes": "96085"
},
{
"name": "Python",
"bytes": "1984569"
},
{
"name": "Ruby",
"bytes": "255867"
},
{
"name": "Shell",
"bytes": "33353"
}
],
"symlink_target": ""
} |
from common.methods import set_progress
import os
import json, tempfile
from google.cloud import storage
from resourcehandlers.gce.models import GCEHandler
from django.conf import settings
from pathlib import Path
def create_client(rh):
json_fd, json_path = tempfile.mkstemp()
json_dict = {'client_email': rh.serviceaccount,
'token_uri': 'https://www.googleapis.com/oauth2/v4/token',
'private_key': rh.servicepasswd
}
with open(json_path, 'w') as fh:
json.dump(json_dict, fh)
client = storage.client.Client.from_service_account_json(json_path,
project=rh.project)
os.close(json_fd)
return client
def generate_options_for_file_name(control_value=None, **kwargs):
names = []
if control_value:
path = os.path.expanduser(control_value)
names.extend([x for x in os.listdir(path)])
return names
def generate_options_for_make_blob_public(**kwargs):
return [True, False]
def run(job, *args, **kwargs):
resource = kwargs.get('resource')
resource_handler = GCEHandler.objects.get(id=resource.google_rh_id)
file = "{{ file }}"
file_name = Path(file).name
make_blob_public = "{{ make_blob_public }}"
client = create_client(resource_handler)
bucket = client.get_bucket(resource.bucket_name)
set_progress(bucket)
if bucket:
blob = bucket.blob(file_name)
res = blob.upload_from_filename(name)
if not res:
if make_blob_public:
blob.make_public()
return f"SUCCESS", f"`{file_name}` Uploaded successfully", ""
else:
return "FAILURE", "Bucket does not exist", f"Bucket `{resource.bucket_name}` does not exist"
| {
"content_hash": "a8a913cd28226369e07f1b05247dccb9",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 97,
"avg_line_length": 29.52542372881356,
"alnum_prop": 0.6435132032146957,
"repo_name": "CloudBoltSoftware/cloudbolt-forge",
"id": "698b649d14a966f4022b51532e8366f688b840ee",
"size": "1742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blueprints/google_storage/management/upload_blob.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1665"
},
{
"name": "HTML",
"bytes": "165828"
},
{
"name": "JavaScript",
"bytes": "1871"
},
{
"name": "PowerShell",
"bytes": "5779"
},
{
"name": "Python",
"bytes": "1742154"
},
{
"name": "Shell",
"bytes": "16836"
}
],
"symlink_target": ""
} |
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class SignatureEnvelopeResult:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'envelope': 'SignatureEnvelopeInfo'
}
self.envelope = None # SignatureEnvelopeInfo
| {
"content_hash": "79f1b8773a453c9849dcd4f5d5a84efd",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 77,
"avg_line_length": 30.161290322580644,
"alnum_prop": 0.6887700534759358,
"repo_name": "liosha2007/temporary-groupdocs-python3-sdk",
"id": "2e0c59e6bfc51767e56ab31e57989d8bb1897a67",
"size": "957",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "groupdocs/models/SignatureEnvelopeResult.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "992590"
}
],
"symlink_target": ""
} |
"""Module only used to log the number of followers to a file"""
from datetime import datetime
from selenium.common.exceptions import NoSuchElementException
def log_follower_num(browser, username):
"""Prints and logs the current number of followers to
a seperate file"""
browser.get('https://www.instagram.com/' + username)
followed_by = browser.execute_script("return window._sharedData.entry_data.ProfilePage[0].user.followed_by.count")
with open('./logs/followerNum.txt', 'a') as numFile:
numFile.write('{:%Y-%m-%d %H:%M} {}\n'.format(datetime.now(), followed_by or 0))
def log_followed_pool(login, followed):
"""Prints and logs the followed to
a seperate file"""
try:
with open('./logs/' + login + '_followedPool.csv', 'a') as followPool:
followPool.write(followed + ",\n")
except BaseException as e:
print("log_followed_pool error \n", str(e))
| {
"content_hash": "7c05913f0125e4483349035ba671cf12",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 118,
"avg_line_length": 38.583333333333336,
"alnum_prop": 0.6727861771058316,
"repo_name": "juanchoabdon/insta-data-science",
"id": "a0108ebb6dc0deaa40009add33375673714cee55",
"size": "926",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "instapy/print_log_writer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PowerShell",
"bytes": "1267"
},
{
"name": "Python",
"bytes": "121188"
},
{
"name": "Shell",
"bytes": "2055"
}
],
"symlink_target": ""
} |
"""
Django settings for GTFSBuilder project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z2n&w9cgzkf_coxk989q%6ltzy^9gbo)lpt39@f0-n)$52ct8%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'grappelli',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'gtfs',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'GTFSBuilder.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
"django.core.context_processors.request",
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'GTFSBuilder.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
GRAPPELLI_ADMIN_TITLE = 'GTFS Builder'
GRAPPELLI_AUTOCOMPLETE_LIMIT = 15
GRAPPELLI_SWITCH_USER = True
# FIXTURE_DIRS = (
# os.path.join(BASE_DIR, 'gtfs', 'fixtures')
# )
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'files', 'static')
| {
"content_hash": "230508a3ebffc5381b97da9dcfcfe167",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 71,
"avg_line_length": 27.172727272727272,
"alnum_prop": 0.6901973904315825,
"repo_name": "MarconiMediaGroup/GTFS-Builder",
"id": "2c333e5550547fc501d28267da0f42348ee77de3",
"size": "3031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GTFSBuilder/settings.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "45449"
}
],
"symlink_target": ""
} |
from searchengine.index.index import Index
from searchengine.parser import RequestDocument
def binary_independance_retrieval(request, index, nb_answers):
"""
Parameters: request, list of documents, common words and number of answers required
Result : a sorted list of k document ids ranked by pertinence
"""
request_doc = RequestDocument(request)
request_index = Index(index.common_words, [request_doc])
out = []
for doc_id in index.doc_ids:
rsv = index.probability_rsv(request_index, doc_id)
if rsv is not None:
out.append((doc_id, rsv))
return sorted(out, key=lambda x: -x[1])[:nb_answers]
| {
"content_hash": "c07a069193a7dce4a0fce293aa6f7f29",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 87,
"avg_line_length": 38.705882352941174,
"alnum_prop": 0.6899696048632219,
"repo_name": "Neki/searchengine",
"id": "059cb8edf18e845b800e2bbabbf3fd5685c5e3e6",
"size": "658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "searchengine/search/probability_search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62370"
}
],
"symlink_target": ""
} |
from sugar3.activity import bundlebuilder
bundlebuilder.start()
| {
"content_hash": "b04ff093401018a3a3d62e2e43a5bf0e",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 41,
"avg_line_length": 32,
"alnum_prop": 0.859375,
"repo_name": "MrNex/Matter.py",
"id": "5dc0ff42becda414545503ab7897d49749c7925b",
"size": "824",
"binary": false,
"copies": "7",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59294"
},
{
"name": "Shell",
"bytes": "1797"
}
],
"symlink_target": ""
} |
"""VRNN classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import functools
import sonnet as snt
import tensorflow as tf
from fivo.models import base
VRNNState = namedtuple("VRNNState", "rnn_state latent_encoded")
class VRNN(object):
"""Implementation of a Variational Recurrent Neural Network (VRNN).
Introduced in "A Recurrent Latent Variable Model for Sequential data"
by Chung et al. https://arxiv.org/pdf/1506.02216.pdf.
The VRNN is a sequence model similar to an RNN that uses stochastic latent
variables to improve its representational power. It can be thought of as a
sequential analogue to the variational auto-encoder (VAE).
The VRNN has a deterministic RNN as its backbone, represented by the
sequence of RNN hidden states h_t. At each timestep, the RNN hidden state h_t
is conditioned on the previous sequence element, x_{t-1}, as well as the
latent state from the previous timestep, z_{t-1}.
In this implementation of the VRNN the latent state z_t is Gaussian. The
model's prior over z_t (also called the transition distribution) is
distributed as Normal(mu_t, diag(sigma_t^2)) where mu_t and sigma_t are the
mean and standard deviation output from a fully connected network that accepts
the rnn hidden state h_t as input.
The emission distribution p(x_t|z_t, h_t) is conditioned on the latent state
z_t as well as the current RNN hidden state h_t via a fully connected network.
To increase the modeling power of the VRNN, two additional networks are
used to extract features from the data and the latent state. Those networks
are called data_encoder and latent_encoder respectively.
For an example of how to call the VRNN's methods see sample_step.
There are a few differences between this exposition and the paper.
First, the indexing scheme for h_t is different than the paper's -- what the
paper calls h_t we call h_{t+1}. This is the same notation used by Fraccaro
et al. to describe the VRNN in the paper linked above. Also, the VRNN paper
uses VAE terminology to refer to the different internal networks, so it
refers to the emission distribution as the decoder. This implementation also
renames the functions phi_x and phi_z in the paper to data_encoder and
latent_encoder.
"""
def __init__(self,
rnn_cell,
data_encoder,
latent_encoder,
transition,
emission,
random_seed=None):
"""Create a VRNN.
Args:
rnn_cell: A subclass of tf.nn.rnn_cell.RNNCell that will form the
deterministic backbone of the VRNN. The inputs to the RNN will be the
encoded latent state of the previous timestep with shape
[batch_size, encoded_latent_size] as well as the encoded input of the
current timestep, a Tensor of shape [batch_size, encoded_data_size].
data_encoder: A callable that accepts a batch of data x_t and
'encodes' it, e.g. runs it through a fully connected network. Must
accept as argument the inputs x_t, a Tensor of the shape
[batch_size, data_size] and return a Tensor of shape
[batch_size, encoded_data_size]. This callable will be called multiple
times in the VRNN cell so if scoping is not handled correctly then
multiple copies of the variables in this network could be made. It is
recommended to use a snt.nets.MLP module, which takes care of this for
you.
latent_encoder: A callable that accepts a latent state z_t and
'encodes' it, e.g. runs it through a fully connected network. Must
accept as argument a Tensor of shape [batch_size, latent_size] and
return a Tensor of shape [batch_size, encoded_latent_size].
This callable must also have the property 'output_size' defined,
returning encoded_latent_size.
transition: A callable that implements the transition distribution
p(z_t|h_t). Must accept as argument the previous RNN hidden state and
return a tf.distributions.Normal distribution conditioned on the input.
emission: A callable that implements the emission distribution
p(x_t|z_t, h_t). Must accept as arguments the encoded latent state
and the RNN hidden state and return a subclass of
tf.distributions.Distribution that can be used to evaluate the logprob
of the targets.
random_seed: The seed for the random ops. Sets the seed for sample_step.
"""
self.random_seed = random_seed
self.rnn_cell = rnn_cell
self.data_encoder = data_encoder
self.latent_encoder = latent_encoder
self.encoded_z_size = latent_encoder.output_size
self.state_size = (self.rnn_cell.state_size)
self._transition = transition
self._emission = emission
def zero_state(self, batch_size, dtype):
"""The initial state of the VRNN.
Contains the initial state of the RNN and the inital encoded latent.
Args:
batch_size: The batch size.
dtype: The data type of the VRNN.
Returns:
zero_state: The initial state of the VRNN.
"""
return VRNNState(
rnn_state=self.rnn_cell.zero_state(batch_size, dtype),
latent_encoded=tf.zeros(
[batch_size, self.latent_encoder.output_size], dtype=dtype))
def run_rnn(self, prev_rnn_state, prev_latent_encoded, inputs):
"""Runs the deterministic RNN for one step.
Args:
prev_rnn_state: The state of the RNN from the previous timestep.
prev_latent_encoded: Float Tensor of shape
[batch_size, encoded_latent_size], the previous latent state z_{t-1}
run through latent_encoder.
inputs: A Tensor of shape [batch_size, data_size], the current inputs to
the model. Most often this is x_{t-1}, the previous token in the
observation sequence.
Returns:
rnn_out: The output of the RNN.
rnn_state: The new state of the RNN.
"""
inputs_encoded = self.data_encoder(tf.to_float(inputs))
rnn_inputs = tf.concat([inputs_encoded, prev_latent_encoded], axis=1)
rnn_out, rnn_state = self.rnn_cell(rnn_inputs, prev_rnn_state)
return rnn_out, rnn_state
def transition(self, rnn_out):
"""Computes the transition distribution p(z_t|h_t).
Note that p(z_t | h_t) = p(z_t| z_{1:t-1}, x_{1:t-1})
Args:
rnn_out: The output of the rnn for the current timestep.
Returns:
p(z_t | h_t): A normal distribution with event shape
[batch_size, latent_size].
"""
return self._transition(rnn_out)
def emission(self, latent, rnn_out):
"""Computes the emission distribution p(x_t | z_t, h_t).
Note that p(x_t | z_t, h_t) = p(x_t | z_{1:t}, x_{1:t-1}).
Args:
latent: The stochastic latent state z_t.
rnn_out: The output of the rnn for the current timestep.
Returns:
p(x_t | z_t, h_t): A distribution with event shape
[batch_size, data_size].
latent_encoded: The latent state encoded with latent_encoder. Should be
passed to run_rnn on the next timestep.
"""
latent_encoded = self.latent_encoder(latent)
return self._emission(latent_encoded, rnn_out), latent_encoded
def sample_step(self, prev_state, inputs, unused_t):
"""Samples one output from the model.
Args:
prev_state: The previous state of the model, a VRNNState containing the
previous rnn state and the previous encoded latent.
inputs: A Tensor of shape [batch_size, data_size], the current inputs to
the model. Most often this is x_{t-1}, the previous token in the
observation sequence.
unused_t: The current timestep. Not used currently.
Returns:
new_state: The next state of the model, a VRNNState.
xt: A float Tensor of shape [batch_size, data_size], an output sampled
from the emission distribution.
"""
rnn_out, rnn_state = self.run_rnn(prev_state.rnn_state,
prev_state.latent_encoded,
inputs)
p_zt = self.transition(rnn_out)
zt = p_zt.sample(seed=self.random_seed)
p_xt_given_zt, latent_encoded = self.emission(zt, rnn_out)
xt = p_xt_given_zt.sample(seed=self.random_seed)
new_state = VRNNState(rnn_state=rnn_state, latent_encoded=latent_encoded)
return new_state, tf.to_float(xt)
# pylint: disable=invalid-name
# pylint thinks this is a top-level constant.
TrainableVRNNState = namedtuple("TrainableVRNNState",
VRNNState._fields + ("rnn_out",))
# pylint: enable=g-invalid-name
class TrainableVRNN(VRNN, base.ELBOTrainableSequenceModel):
"""A VRNN subclass with proposals and methods for training and evaluation.
This class adds proposals used for training with importance-sampling based
methods such as the ELBO. The model can be configured to propose from one
of three proposals: a learned filtering proposal, a learned smoothing
proposal, or the prior (i.e. the transition distribution).
As described in the VRNN paper, the learned filtering proposal is
parameterized by a fully connected neural network that accepts as input the
current target x_t and the current rnn output h_t. The learned smoothing
proposal is also given the hidden state of an RNN run in reverse over the
inputs, so as to incorporate information about future observations. This
smoothing proposal is not described in the VRNN paper.
All learned proposals use the 'res_q' parameterization, meaning that instead
of directly producing the mean of z_t, the proposal network predicts the
'residual' from the prior's mean. This is explored more in section 3.3 of
https://arxiv.org/pdf/1605.07571.pdf.
During training, the latent state z_t is sampled from the proposal and the
reparameterization trick is used to provide low-variance gradients.
Note that the VRNN paper uses VAE terminology to refer to the different
internal networks, so the proposal is referred to as the encoder.
"""
def __init__(self,
rnn_cell,
data_encoder,
latent_encoder,
transition,
emission,
proposal_type,
proposal=None,
rev_rnn_cell=None,
tilt=None,
random_seed=None):
"""Create a trainable RNN.
Args:
rnn_cell: A subclass of tf.nn.rnn_cell.RNNCell that will form the
deterministic backbone of the VRNN. The inputs to the RNN will be the
encoded latent state of the previous timestep with shape
[batch_size, encoded_latent_size] as well as the encoded input of the
current timestep, a Tensor of shape [batch_size, encoded_data_size].
data_encoder: A callable that accepts a batch of data x_t and
'encodes' it, e.g. runs it through a fully connected network. Must
accept as argument the inputs x_t, a Tensor of the shape
[batch_size, data_size] and return a Tensor of shape
[batch_size, encoded_data_size]. This callable will be called multiple
times in the VRNN cell so if scoping is not handled correctly then
multiple copies of the variables in this network could be made. It is
recommended to use a snt.nets.MLP module, which takes care of this for
you.
latent_encoder: A callable that accepts a latent state z_t and
'encodes' it, e.g. runs it through a fully connected network. Must
accept as argument a Tensor of shape [batch_size, latent_size] and
return a Tensor of shape [batch_size, encoded_latent_size].
This callable must also have the property 'output_size' defined,
returning encoded_latent_size.
transition: A callable that implements the transition distribution
p(z_t|h_t). Must accept as argument the previous RNN hidden state and
return a tf.distributions.Normal distribution conditioned on the input.
emission: A callable that implements the emission distribution
p(x_t|z_t, h_t). Must accept as arguments the encoded latent state
and the RNN hidden state and return a subclass of
tf.distributions.Distribution that can be used to evaluate the logprob
of the targets.
proposal_type: A string indicating the type of proposal to use. Can
be either "filtering", "smoothing", or "prior". When proposal_type is
"filtering" or "smoothing", proposal must be provided. When
proposal_type is "smoothing", rev_rnn_cell must also be provided.
proposal: A callable that implements the proposal q(z_t| h_t, x_{1:T}).
If proposal_type is "filtering" then proposal must accept as arguments
the current rnn output, the encoded target of the current timestep,
and the mean of the prior. If proposal_type is "smoothing" then
in addition to the current rnn output and the mean of the prior
proposal must accept as arguments the output of the reverse rnn.
proposal should return a tf.distributions.Normal distribution
conditioned on its inputs. If proposal_type is "prior" this argument is
ignored.
rev_rnn_cell: A subclass of tf.nn.rnn_cell.RNNCell that will aggregate
observation statistics in the reverse direction. The inputs to the RNN
will be the encoded reverse input of the current timestep, a Tensor of
shape [batch_size, encoded_data_size].
tilt: A callable that implements the log of a positive tilting function
(ideally approximating log p(x_{t+1}|z_t, h_t). Must accept as arguments
the encoded latent state and the RNN hidden state and return a subclass
of tf.distributions.Distribution that can be used to evaluate the
logprob of x_{t+1}. Optionally, None and then no tilt is used.
random_seed: The seed for the random ops. Sets the seed for sample_step
and __call__.
"""
super(TrainableVRNN, self).__init__(
rnn_cell, data_encoder, latent_encoder,
transition, emission, random_seed=random_seed)
self.rev_rnn_cell = rev_rnn_cell
self._tilt = tilt
assert proposal_type in ["filtering", "smoothing", "prior"]
self._proposal = proposal
self.proposal_type = proposal_type
if proposal_type != "prior":
assert proposal, "If not proposing from the prior, must provide proposal."
if proposal_type == "smoothing":
assert rev_rnn_cell, "Must provide rev_rnn_cell for smoothing proposal."
def zero_state(self, batch_size, dtype):
super_state = super(TrainableVRNN, self).zero_state(batch_size, dtype)
return TrainableVRNNState(
rnn_out=tf.zeros([batch_size, self.rnn_cell.output_size], dtype=dtype),
**super_state._asdict())
def set_observations(self, observations, seq_lengths):
"""Stores the model's observations.
Stores the observations (inputs and targets) in TensorArrays and precomputes
things for later like the reverse RNN output and encoded targets.
Args:
observations: The observations of the model, a tuple containing two
Tensors of shape [max_seq_len, batch_size, data_size]. The Tensors
should be the inputs and targets, respectively.
seq_lengths: An int Tensor of shape [batch_size] containing the length
of each sequence in observations.
"""
inputs, targets = observations
self.seq_lengths = seq_lengths
self.max_seq_len = tf.reduce_max(seq_lengths)
self.inputs_ta = base.ta_for_tensor(inputs, clear_after_read=False)
self.targets_ta = base.ta_for_tensor(targets, clear_after_read=False)
targets_encoded = base.encode_all(targets, self.data_encoder)
self.targets_encoded_ta = base.ta_for_tensor(targets_encoded,
clear_after_read=False)
if self.rev_rnn_cell:
reverse_targets_encoded = tf.reverse_sequence(
targets_encoded, seq_lengths, seq_axis=0, batch_axis=1)
# Compute the reverse rnn over the targets.
reverse_rnn_out, _ = tf.nn.dynamic_rnn(self.rev_rnn_cell,
reverse_targets_encoded,
time_major=True,
dtype=tf.float32)
reverse_rnn_out = tf.reverse_sequence(reverse_rnn_out, seq_lengths,
seq_axis=0, batch_axis=1)
self.reverse_rnn_ta = base.ta_for_tensor(reverse_rnn_out,
clear_after_read=False)
def _filtering_proposal(self, rnn_out, prior, t):
"""Computes the filtering proposal distribution."""
return self._proposal(rnn_out,
self.targets_encoded_ta.read(t),
prior_mu=prior.mean())
def _smoothing_proposal(self, rnn_out, prior, t):
"""Computes the smoothing proposal distribution."""
return self._proposal(rnn_out,
smoothing_tensors=[self.reverse_rnn_ta.read(t)],
prior_mu=prior.mean())
def proposal(self, rnn_out, prior, t):
"""Computes the proposal distribution specified by proposal_type.
Args:
rnn_out: The output of the rnn for the current timestep.
prior: A tf.distributions.Normal distribution representing the prior
over z_t, p(z_t | z_{1:t-1}, x_{1:t-1}). Used for 'res_q'.
t: A scalar int Tensor, the current timestep.
"""
if self.proposal_type == "filtering":
return self._filtering_proposal(rnn_out, prior, t)
elif self.proposal_type == "smoothing":
return self._smoothing_proposal(rnn_out, prior, t)
elif self.proposal_type == "prior":
return self.transition(rnn_out)
def tilt(self, rnn_out, latent_encoded, targets):
r_func = self._tilt(rnn_out, latent_encoded)
return tf.reduce_sum(r_func.log_prob(targets), axis=-1)
def propose_and_weight(self, state, t):
"""Runs the model and computes importance weights for one timestep.
Runs the model and computes importance weights, sampling from the proposal
instead of the transition/prior.
Args:
state: The previous state of the model, a TrainableVRNNState containing
the previous rnn state, the previous rnn outs, and the previous encoded
latent.
t: A scalar integer Tensor, the current timestep.
Returns:
weights: A float Tensor of shape [batch_size].
new_state: The new state of the model.
"""
inputs = self.inputs_ta.read(t)
targets = self.targets_ta.read(t)
rnn_out, next_rnn_state = self.run_rnn(state.rnn_state,
state.latent_encoded,
inputs)
p_zt = self.transition(rnn_out)
q_zt = self.proposal(rnn_out, p_zt, t)
zt = q_zt.sample(seed=self.random_seed)
p_xt_given_zt, latent_encoded = self.emission(zt, rnn_out)
log_p_xt_given_zt = tf.reduce_sum(p_xt_given_zt.log_prob(targets), axis=-1)
log_p_zt = tf.reduce_sum(p_zt.log_prob(zt), axis=-1)
log_q_zt = tf.reduce_sum(q_zt.log_prob(zt), axis=-1)
weights = log_p_zt + log_p_xt_given_zt - log_q_zt
if self._tilt:
prev_log_r = tf.cond(
tf.greater(t, 0),
lambda: self.tilt(state.rnn_out, state.latent_encoded, targets),
lambda: 0.) # On the first step, prev_log_r = 0.
log_r = tf.cond(
tf.less(t + 1, self.max_seq_len),
lambda: self.tilt(rnn_out, latent_encoded, self.targets_ta.read(t+1)),
lambda: 0.)
# On the last step, log_r = 0.
log_r *= tf.to_float(t < self.seq_lengths - 1)
weights += log_r - prev_log_r
new_state = TrainableVRNNState(rnn_state=next_rnn_state,
rnn_out=rnn_out,
latent_encoded=latent_encoded)
return weights, new_state
_DEFAULT_INITIALIZERS = {"w": tf.contrib.layers.xavier_initializer(),
"b": tf.zeros_initializer()}
def create_vrnn(
data_size,
latent_size,
emission_class,
rnn_hidden_size=None,
fcnet_hidden_sizes=None,
encoded_data_size=None,
encoded_latent_size=None,
sigma_min=0.0,
raw_sigma_bias=0.25,
emission_bias_init=0.0,
use_tilt=False,
proposal_type="filtering",
initializers=None,
random_seed=None):
"""A factory method for creating VRNN cells.
Args:
data_size: The dimension of the vectors that make up the data sequences.
latent_size: The size of the stochastic latent state of the VRNN.
emission_class: The class of the emission distribution. Can be either
ConditionalNormalDistribution or ConditionalBernoulliDistribution.
rnn_hidden_size: The hidden state dimension of the RNN that forms the
deterministic part of this VRNN. If None, then it defaults
to latent_size.
fcnet_hidden_sizes: A list of python integers, the size of the hidden
layers of the fully connected networks that parameterize the conditional
distributions of the VRNN. If None, then it defaults to one hidden
layer of size latent_size.
encoded_data_size: The size of the output of the data encoding network. If
None, defaults to latent_size.
encoded_latent_size: The size of the output of the latent state encoding
network. If None, defaults to latent_size.
sigma_min: The minimum value that the standard deviation of the
distribution over the latent state can take.
raw_sigma_bias: A scalar that is added to the raw standard deviation
output from the neural networks that parameterize the prior and
approximate posterior. Useful for preventing standard deviations close
to zero.
emission_bias_init: A bias to added to the raw output of the fully
connected network that parameterizes the emission distribution. Useful
for initalizing the mean of the distribution to a sensible starting point
such as the mean of the training data. Only used with Bernoulli generative
distributions.
use_tilt: If true, create a VRNN with a tilting function.
proposal_type: The type of proposal to use. Can be "filtering", "smoothing",
or "prior".
initializers: The variable intitializers to use for the fully connected
networks and RNN cell. Must be a dictionary mapping the keys 'w' and 'b'
to the initializers for the weights and biases. Defaults to xavier for
the weights and zeros for the biases when initializers is None.
random_seed: A random seed for the VRNN resampling operations.
Returns:
model: A TrainableVRNN object.
"""
if rnn_hidden_size is None:
rnn_hidden_size = latent_size
if fcnet_hidden_sizes is None:
fcnet_hidden_sizes = [latent_size]
if encoded_data_size is None:
encoded_data_size = latent_size
if encoded_latent_size is None:
encoded_latent_size = latent_size
if initializers is None:
initializers = _DEFAULT_INITIALIZERS
data_encoder = snt.nets.MLP(
output_sizes=fcnet_hidden_sizes + [encoded_data_size],
initializers=initializers,
name="data_encoder")
latent_encoder = snt.nets.MLP(
output_sizes=fcnet_hidden_sizes + [encoded_latent_size],
initializers=initializers,
name="latent_encoder")
transition = base.ConditionalNormalDistribution(
size=latent_size,
hidden_layer_sizes=fcnet_hidden_sizes,
sigma_min=sigma_min,
raw_sigma_bias=raw_sigma_bias,
initializers=initializers,
name="prior")
# Construct the emission distribution.
if emission_class == base.ConditionalBernoulliDistribution:
# For Bernoulli distributed outputs, we initialize the bias so that the
# network generates on average the mean from the training set.
emission_dist = functools.partial(base.ConditionalBernoulliDistribution,
bias_init=emission_bias_init)
else:
emission_dist = base.ConditionalNormalDistribution
emission = emission_dist(
size=data_size,
hidden_layer_sizes=fcnet_hidden_sizes,
initializers=initializers,
name="generative")
# Construct the proposal distribution.
if proposal_type in ["filtering", "smoothing"]:
proposal = base.NormalApproximatePosterior(
size=latent_size,
hidden_layer_sizes=fcnet_hidden_sizes,
sigma_min=sigma_min,
raw_sigma_bias=raw_sigma_bias,
initializers=initializers,
smoothing=(proposal_type == "smoothing"),
name="approximate_posterior")
else:
proposal = None
if use_tilt:
tilt = emission_dist(
size=data_size,
hidden_layer_sizes=fcnet_hidden_sizes,
initializers=initializers,
name="tilt")
else:
tilt = None
rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_hidden_size,
initializer=initializers["w"])
rev_rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_hidden_size,
initializer=initializers["w"])
return TrainableVRNN(
rnn_cell, data_encoder, latent_encoder, transition,
emission, proposal_type, proposal=proposal, rev_rnn_cell=rev_rnn_cell,
tilt=tilt, random_seed=random_seed)
| {
"content_hash": "524ddc4319a47e6ccaee608c0b0c8afd",
"timestamp": "",
"source": "github",
"line_count": 557,
"max_line_length": 80,
"avg_line_length": 45.50448833034111,
"alnum_prop": 0.6724927010179121,
"repo_name": "tombstone/models",
"id": "4e2552088c19f141a75d791d2be0d0a5238ed87c",
"size": "26035",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "research/fivo/fivo/models/vrnn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1365199"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "1858048"
},
{
"name": "Makefile",
"bytes": "4763"
},
{
"name": "Python",
"bytes": "7241242"
},
{
"name": "Shell",
"bytes": "102270"
},
{
"name": "TypeScript",
"bytes": "6515"
}
],
"symlink_target": ""
} |
import os
import shutil
from errno import ENOENT, EEXIST
import hashlib
import sys
from os.path import abspath, realpath, join as joinpath
import platform
import re
import six
from conans.util.log import logger
import tarfile
import stat
def make_read_only(path):
for root, _, files in os.walk(path):
for f in files:
full_path = os.path.join(root, f)
mode = os.stat(full_path).st_mode
os.chmod(full_path, mode & ~ stat.S_IWRITE)
_DIRTY_FOLDER = ".dirty"
def set_dirty(folder):
dirty_file = os.path.normpath(folder) + _DIRTY_FOLDER
save(dirty_file, "")
def clean_dirty(folder):
dirty_file = os.path.normpath(folder) + _DIRTY_FOLDER
os.remove(dirty_file)
def is_dirty(folder):
dirty_file = os.path.normpath(folder) + _DIRTY_FOLDER
return os.path.exists(dirty_file)
def decode_text(text):
decoders = ["utf-8", "Windows-1252"]
for decoder in decoders:
try:
return text.decode(decoder)
except UnicodeDecodeError:
continue
logger.warn("can't decode %s" % str(text))
return text.decode("utf-8", "ignore") # Ignore not compatible characters
def touch(fname, times=None):
os.utime(fname, times)
def normalize(text):
if platform.system() == "Windows":
return re.sub("\r?\n", "\r\n", text)
else:
return text
def md5(content):
md5alg = hashlib.md5()
if isinstance(content, bytes):
tmp = content
else:
tmp = content.encode()
md5alg.update(tmp)
return md5alg.hexdigest()
def md5sum(file_path):
return _generic_algorithm_sum(file_path, "md5")
def sha1sum(file_path):
return _generic_algorithm_sum(file_path, "sha1")
def sha256sum(file_path):
return _generic_algorithm_sum(file_path, "sha256")
def _generic_algorithm_sum(file_path, algorithm_name):
with open(file_path, 'rb') as fh:
m = hashlib.new(algorithm_name)
while True:
data = fh.read(8192)
if not data:
break
m.update(data)
return m.hexdigest()
def save(path, content, append=False):
"""
Saves a file with given content
Params:
path: path to write file to
load: contents to save in the file
"""
try:
os.makedirs(os.path.dirname(path))
except:
pass
mode = 'wb' if not append else 'ab'
with open(path, mode) as handle:
handle.write(to_file_bytes(content))
def to_file_bytes(content):
if six.PY3:
if not isinstance(content, bytes):
content = bytes(content, "utf-8")
return content
def save_files(path, files):
for name, content in list(files.items()):
save(os.path.join(path, name), content)
def load(path, binary=False):
'''Loads a file content'''
with open(path, 'rb') as handle:
tmp = handle.read()
return tmp if binary else decode_text(tmp)
def relative_dirs(path):
''' Walks a dir and return a list with the relative paths '''
ret = []
for dirpath, _, fnames in os.walk(path):
for filename in fnames:
tmp = os.path.join(dirpath, filename)
tmp = tmp[len(path) + 1:]
ret.append(tmp)
return ret
def _change_permissions(func, path, exc_info):
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
def rmdir(path):
try:
shutil.rmtree(path, onerror=_change_permissions)
except OSError as err:
if err.errno == ENOENT:
return
raise
def mkdir(path):
"""Recursive mkdir, doesnt fail if already existing"""
try:
os.makedirs(path)
except OSError as err:
if err.errno != EEXIST:
raise
def path_exists(path, basedir):
"""Case sensitive, for windows, optional
basedir for skip caps check for tmp folders in testing for example (returned always
in lowercase for some strange reason)"""
exists = os.path.exists(path)
if not exists or sys.platform == "linux2":
return exists
path = os.path.normpath(path)
path = os.path.relpath(path, basedir)
chunks = path.split(os.sep)
tmp = basedir
for chunk in chunks:
if chunk and chunk not in os.listdir(tmp):
return False
tmp = os.path.normpath(tmp + os.sep + chunk)
return True
def gzopen_without_timestamps(name, mode="r", fileobj=None, compresslevel=None, **kwargs):
""" !! Method overrided by laso to pass mtime=0 (!=None) to avoid time.time() was
setted in Gzip file causing md5 to change. Not possible using the
previous tarfile open because arguments are not passed to GzipFile constructor
"""
from tarfile import CompressionError, ReadError
compresslevel = compresslevel or int(os.getenv("CONAN_COMPRESSION_LEVEL", 9))
if mode not in ("r", "w"):
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
try:
fileobj = gzip.GzipFile(name, mode, compresslevel, fileobj, mtime=0)
except OSError:
if fileobj is not None and mode == 'r':
raise ReadError("not a gzip file")
raise
try:
t = tarfile.TarFile.taropen(name, mode, fileobj, **kwargs)
except IOError:
fileobj.close()
if mode == 'r':
raise ReadError("not a gzip file")
raise
except:
fileobj.close()
raise
t._extfileobj = False
return t
def tar_extract(fileobj, destination_dir):
"""Extract tar file controlling not absolute paths and fixing the routes
if the tar was zipped in windows"""
def badpath(path, base):
# joinpath will ignore base if path is absolute
return not realpath(abspath(joinpath(base, path))).startswith(base)
def safemembers(members):
base = realpath(abspath("."))
for finfo in members:
if badpath(finfo.name, base) or finfo.islnk():
continue
else:
# Fixes unzip a windows zipped file in linux
finfo.name = finfo.name.replace("\\", "/")
yield finfo
the_tar = tarfile.open(fileobj=fileobj)
# NOTE: The errorlevel=2 has been removed because it was failing in Win10, it didn't allow to
# "could not change modification time", with time=0
# the_tar.errorlevel = 2 # raise exception if any error
the_tar.extractall(path=destination_dir, members=safemembers(the_tar))
the_tar.close()
def list_folder_subdirs(basedir, level):
ret = []
for root, dirs, _ in os.walk(basedir):
rel_path = os.path.relpath(root, basedir)
if rel_path == ".":
continue
dir_split = rel_path.split(os.sep)
if len(dir_split) == level:
ret.append("/".join(dir_split))
dirs[:] = [] # Stop iterate subdirs
return ret
def exception_message_safe(exc):
try:
return str(exc)
except:
return decode_text(repr(exc))
| {
"content_hash": "2718381690dd39b9ec0b9b7563044498",
"timestamp": "",
"source": "github",
"line_count": 272,
"max_line_length": 97,
"avg_line_length": 26.34926470588235,
"alnum_prop": 0.614762104088182,
"repo_name": "lasote/conan",
"id": "c5e5ae5c72122c9d9313d6daa4bd527fa5baba37",
"size": "7167",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "conans/util/files.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1124"
},
{
"name": "Python",
"bytes": "2480006"
},
{
"name": "Shell",
"bytes": "1912"
}
],
"symlink_target": ""
} |
import unittest
import nengo
import time
import numpy as np
import matplotlib.pyplot as plt
from brainspawn.simulator.sim_manager import SimManager
from brainspawn.sample_networks.two_dimensional_rep import model, sin, cos, neurons
class SimManagerTests(unittest.TestCase):
"""Test cases for SimManager
"""
#TODO - Mike really needs to add real tests here...
def test_simulator(self):
"""Runs simulator
Not really a unit test, more of an integration test
and/or demo of the simulator
"""
# Init the sim_manager with a basic model
sim_manager = SimManager()
sim_manager.load_new_model(model, 0.001)
#Assert model contains expected nodes, connections?
# What graphs can I show for some Ensemble?
node_caps = sim_manager.get_caps_for_obj(neurons)
for cap in node_caps:
print (cap.name, cap.get_out_dimensions(neurons))
if (cap.name is 'output'):
out_cap = cap
assert(out_cap)
# Okay, show a basic xy plot
plt.ion()
p1,p2 = plt.plot([], np.empty((0, 2)))
text = []
# Create update function, subscribe it
def update_graph(data, start_time):
start = start_time/sim_manager.dt
count = sim_manager.current_step - sim_manager.min_step
t = np.linspace(start, start + data[start:count].shape[0]*sim_manager.dt, data[start:count].shape[0])
p1.set_xdata(t)
p1.set_ydata(data[start:count,:1])
p2.set_xdata(t)
p2.set_ydata(data[start:count,1:])
if(not text and len(t) > 10):
text.append(plt.text(0.2, 0.67, "such line", fontsize=18, color='orange'))
text.append(plt.text(0.7, 0.5, "very neuron", fontsize=18, color='green'))
text.append(plt.text(0.5, 0.2, "wow", fontsize=18, color='purple'))
elif (text and len(t) < 275):
for txt in text:
txt.set_x((txt.get_position()[0]*100000 + len(t))/100000 % 1 )
plt.draw()
sim_manager.connect_to_obj(neurons, out_cap, update_graph)
# Run the simulation for a bit
for i in range(1000):
time.sleep(.001)
sim_manager.step()
# Hey, everything worked
assert(True)
def main():
unittest.main()
if __name__ == '__main__':
main()
| {
"content_hash": "ab98c303af3add8249ac2926d7bc6e4e",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 113,
"avg_line_length": 32.945945945945944,
"alnum_prop": 0.5787530762920426,
"repo_name": "chairmanmeow50/Brainspawn",
"id": "c0174e34c629ca26b496bf8a6f69643fb7961215",
"size": "2438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "brainspawn/tests/sim_manager_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Puppet",
"bytes": "1577"
},
{
"name": "Python",
"bytes": "144441"
},
{
"name": "Shell",
"bytes": "7348"
}
],
"symlink_target": ""
} |
import errno
import inspect
import os
import sys
from contextlib import contextmanager
from functools import update_wrapper
from itertools import repeat
from ._unicodefun import _verify_python_env
from .exceptions import Abort
from .exceptions import BadParameter
from .exceptions import ClickException
from .exceptions import Exit
from .exceptions import MissingParameter
from .exceptions import UsageError
from .formatting import HelpFormatter
from .formatting import join_options
from .globals import pop_context
from .globals import push_context
from .parser import OptionParser
from .parser import split_opt
from .termui import confirm
from .termui import prompt
from .termui import style
from .types import BOOL
from .types import convert_type
from .types import IntRange
from .utils import echo
from .utils import make_default_short_help
from .utils import make_str
from .utils import PacifyFlushWrapper
_missing = object()
SUBCOMMAND_METAVAR = "COMMAND [ARGS]..."
SUBCOMMANDS_METAVAR = "COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]..."
DEPRECATED_HELP_NOTICE = " (DEPRECATED)"
DEPRECATED_INVOKE_NOTICE = "DeprecationWarning: The command {name} is deprecated."
def _maybe_show_deprecated_notice(cmd):
if cmd.deprecated:
echo(style(DEPRECATED_INVOKE_NOTICE.format(name=cmd.name), fg="red"), err=True)
def fast_exit(code):
"""Exit without garbage collection, this speeds up exit by about 10ms for
things like bash completion.
"""
sys.stdout.flush()
sys.stderr.flush()
os._exit(code)
def _bashcomplete(cmd, prog_name, complete_var=None):
"""Internal handler for the bash completion support."""
if complete_var is None:
complete_var = f"_{prog_name}_COMPLETE".replace("-", "_").upper()
complete_instr = os.environ.get(complete_var)
if not complete_instr:
return
from ._bashcomplete import bashcomplete
if bashcomplete(cmd, prog_name, complete_var, complete_instr):
fast_exit(1)
def _check_multicommand(base_command, cmd_name, cmd, register=False):
if not base_command.chain or not isinstance(cmd, MultiCommand):
return
if register:
hint = (
"It is not possible to add multi commands as children to"
" another multi command that is in chain mode."
)
else:
hint = (
"Found a multi command as subcommand to a multi command"
" that is in chain mode. This is not supported."
)
raise RuntimeError(
f"{hint}. Command {base_command.name!r} is set to chain and"
f" {cmd_name!r} was added as a subcommand but it in itself is a"
f" multi command. ({cmd_name!r} is a {type(cmd).__name__}"
f" within a chained {type(base_command).__name__} named"
f" {base_command.name!r})."
)
def batch(iterable, batch_size):
return list(zip(*repeat(iter(iterable), batch_size)))
@contextmanager
def augment_usage_errors(ctx, param=None):
"""Context manager that attaches extra information to exceptions."""
try:
yield
except BadParameter as e:
if e.ctx is None:
e.ctx = ctx
if param is not None and e.param is None:
e.param = param
raise
except UsageError as e:
if e.ctx is None:
e.ctx = ctx
raise
def iter_params_for_processing(invocation_order, declaration_order):
"""Given a sequence of parameters in the order as should be considered
for processing and an iterable of parameters that exist, this returns
a list in the correct order as they should be processed.
"""
def sort_key(item):
try:
idx = invocation_order.index(item)
except ValueError:
idx = float("inf")
return (not item.is_eager, idx)
return sorted(declaration_order, key=sort_key)
class ParameterSource:
"""This is an enum that indicates the source of a command line parameter.
The enum has one of the following values: COMMANDLINE,
ENVIRONMENT, DEFAULT, DEFAULT_MAP. The DEFAULT indicates that the
default value in the decorator was used. This class should be
converted to an enum when Python 2 support is dropped.
"""
COMMANDLINE = "COMMANDLINE"
ENVIRONMENT = "ENVIRONMENT"
DEFAULT = "DEFAULT"
DEFAULT_MAP = "DEFAULT_MAP"
VALUES = {COMMANDLINE, ENVIRONMENT, DEFAULT, DEFAULT_MAP}
@classmethod
def validate(cls, value):
"""Validate that the specified value is a valid enum.
This method will raise a ValueError if the value is
not a valid enum.
:param value: the string value to verify
"""
if value not in cls.VALUES:
raise ValueError(
f"Invalid ParameterSource value: {value!r}. Valid"
f" values are: {','.join(cls.VALUES)}"
)
class Context:
"""The context is a special internal object that holds state relevant
for the script execution at every single level. It's normally invisible
to commands unless they opt-in to getting access to it.
The context is useful as it can pass internal objects around and can
control special execution features such as reading data from
environment variables.
A context can be used as context manager in which case it will call
:meth:`close` on teardown.
.. versionadded:: 2.0
Added the `resilient_parsing`, `help_option_names`,
`token_normalize_func` parameters.
.. versionadded:: 3.0
Added the `allow_extra_args` and `allow_interspersed_args`
parameters.
.. versionadded:: 4.0
Added the `color`, `ignore_unknown_options`, and
`max_content_width` parameters.
.. versionadded:: 7.1
Added the `show_default` parameter.
:param command: the command class for this context.
:param parent: the parent context.
:param info_name: the info name for this invocation. Generally this
is the most descriptive name for the script or
command. For the toplevel script it is usually
the name of the script, for commands below it it's
the name of the script.
:param obj: an arbitrary object of user data.
:param auto_envvar_prefix: the prefix to use for automatic environment
variables. If this is `None` then reading
from environment variables is disabled. This
does not affect manually set environment
variables which are always read.
:param default_map: a dictionary (like object) with default values
for parameters.
:param terminal_width: the width of the terminal. The default is
inherit from parent context. If no context
defines the terminal width then auto
detection will be applied.
:param max_content_width: the maximum width for content rendered by
Click (this currently only affects help
pages). This defaults to 80 characters if
not overridden. In other words: even if the
terminal is larger than that, Click will not
format things wider than 80 characters by
default. In addition to that, formatters might
add some safety mapping on the right.
:param resilient_parsing: if this flag is enabled then Click will
parse without any interactivity or callback
invocation. Default values will also be
ignored. This is useful for implementing
things such as completion support.
:param allow_extra_args: if this is set to `True` then extra arguments
at the end will not raise an error and will be
kept on the context. The default is to inherit
from the command.
:param allow_interspersed_args: if this is set to `False` then options
and arguments cannot be mixed. The
default is to inherit from the command.
:param ignore_unknown_options: instructs click to ignore options it does
not know and keeps them for later
processing.
:param help_option_names: optionally a list of strings that define how
the default help parameter is named. The
default is ``['--help']``.
:param token_normalize_func: an optional function that is used to
normalize tokens (options, choices,
etc.). This for instance can be used to
implement case insensitive behavior.
:param color: controls if the terminal supports ANSI colors or not. The
default is autodetection. This is only needed if ANSI
codes are used in texts that Click prints which is by
default not the case. This for instance would affect
help output.
:param show_default: if True, shows defaults for all options.
Even if an option is later created with show_default=False,
this command-level setting overrides it.
"""
def __init__(
self,
command,
parent=None,
info_name=None,
obj=None,
auto_envvar_prefix=None,
default_map=None,
terminal_width=None,
max_content_width=None,
resilient_parsing=False,
allow_extra_args=None,
allow_interspersed_args=None,
ignore_unknown_options=None,
help_option_names=None,
token_normalize_func=None,
color=None,
show_default=None,
):
#: the parent context or `None` if none exists.
self.parent = parent
#: the :class:`Command` for this context.
self.command = command
#: the descriptive information name
self.info_name = info_name
#: the parsed parameters except if the value is hidden in which
#: case it's not remembered.
self.params = {}
#: the leftover arguments.
self.args = []
#: protected arguments. These are arguments that are prepended
#: to `args` when certain parsing scenarios are encountered but
#: must be never propagated to another arguments. This is used
#: to implement nested parsing.
self.protected_args = []
if obj is None and parent is not None:
obj = parent.obj
#: the user object stored.
self.obj = obj
self._meta = getattr(parent, "meta", {})
#: A dictionary (-like object) with defaults for parameters.
if (
default_map is None
and parent is not None
and parent.default_map is not None
):
default_map = parent.default_map.get(info_name)
self.default_map = default_map
#: This flag indicates if a subcommand is going to be executed. A
#: group callback can use this information to figure out if it's
#: being executed directly or because the execution flow passes
#: onwards to a subcommand. By default it's None, but it can be
#: the name of the subcommand to execute.
#:
#: If chaining is enabled this will be set to ``'*'`` in case
#: any commands are executed. It is however not possible to
#: figure out which ones. If you require this knowledge you
#: should use a :func:`resultcallback`.
self.invoked_subcommand = None
if terminal_width is None and parent is not None:
terminal_width = parent.terminal_width
#: The width of the terminal (None is autodetection).
self.terminal_width = terminal_width
if max_content_width is None and parent is not None:
max_content_width = parent.max_content_width
#: The maximum width of formatted content (None implies a sensible
#: default which is 80 for most things).
self.max_content_width = max_content_width
if allow_extra_args is None:
allow_extra_args = command.allow_extra_args
#: Indicates if the context allows extra args or if it should
#: fail on parsing.
#:
#: .. versionadded:: 3.0
self.allow_extra_args = allow_extra_args
if allow_interspersed_args is None:
allow_interspersed_args = command.allow_interspersed_args
#: Indicates if the context allows mixing of arguments and
#: options or not.
#:
#: .. versionadded:: 3.0
self.allow_interspersed_args = allow_interspersed_args
if ignore_unknown_options is None:
ignore_unknown_options = command.ignore_unknown_options
#: Instructs click to ignore options that a command does not
#: understand and will store it on the context for later
#: processing. This is primarily useful for situations where you
#: want to call into external programs. Generally this pattern is
#: strongly discouraged because it's not possibly to losslessly
#: forward all arguments.
#:
#: .. versionadded:: 4.0
self.ignore_unknown_options = ignore_unknown_options
if help_option_names is None:
if parent is not None:
help_option_names = parent.help_option_names
else:
help_option_names = ["--help"]
#: The names for the help options.
self.help_option_names = help_option_names
if token_normalize_func is None and parent is not None:
token_normalize_func = parent.token_normalize_func
#: An optional normalization function for tokens. This is
#: options, choices, commands etc.
self.token_normalize_func = token_normalize_func
#: Indicates if resilient parsing is enabled. In that case Click
#: will do its best to not cause any failures and default values
#: will be ignored. Useful for completion.
self.resilient_parsing = resilient_parsing
# If there is no envvar prefix yet, but the parent has one and
# the command on this level has a name, we can expand the envvar
# prefix automatically.
if auto_envvar_prefix is None:
if (
parent is not None
and parent.auto_envvar_prefix is not None
and self.info_name is not None
):
auto_envvar_prefix = (
f"{parent.auto_envvar_prefix}_{self.info_name.upper()}"
)
else:
auto_envvar_prefix = auto_envvar_prefix.upper()
if auto_envvar_prefix is not None:
auto_envvar_prefix = auto_envvar_prefix.replace("-", "_")
self.auto_envvar_prefix = auto_envvar_prefix
if color is None and parent is not None:
color = parent.color
#: Controls if styling output is wanted or not.
self.color = color
self.show_default = show_default
self._close_callbacks = []
self._depth = 0
self._source_by_paramname = {}
def __enter__(self):
self._depth += 1
push_context(self)
return self
def __exit__(self, exc_type, exc_value, tb):
self._depth -= 1
if self._depth == 0:
self.close()
pop_context()
@contextmanager
def scope(self, cleanup=True):
"""This helper method can be used with the context object to promote
it to the current thread local (see :func:`get_current_context`).
The default behavior of this is to invoke the cleanup functions which
can be disabled by setting `cleanup` to `False`. The cleanup
functions are typically used for things such as closing file handles.
If the cleanup is intended the context object can also be directly
used as a context manager.
Example usage::
with ctx.scope():
assert get_current_context() is ctx
This is equivalent::
with ctx:
assert get_current_context() is ctx
.. versionadded:: 5.0
:param cleanup: controls if the cleanup functions should be run or
not. The default is to run these functions. In
some situations the context only wants to be
temporarily pushed in which case this can be disabled.
Nested pushes automatically defer the cleanup.
"""
if not cleanup:
self._depth += 1
try:
with self as rv:
yield rv
finally:
if not cleanup:
self._depth -= 1
@property
def meta(self):
"""This is a dictionary which is shared with all the contexts
that are nested. It exists so that click utilities can store some
state here if they need to. It is however the responsibility of
that code to manage this dictionary well.
The keys are supposed to be unique dotted strings. For instance
module paths are a good choice for it. What is stored in there is
irrelevant for the operation of click. However what is important is
that code that places data here adheres to the general semantics of
the system.
Example usage::
LANG_KEY = f'{__name__}.lang'
def set_language(value):
ctx = get_current_context()
ctx.meta[LANG_KEY] = value
def get_language():
return get_current_context().meta.get(LANG_KEY, 'en_US')
.. versionadded:: 5.0
"""
return self._meta
def make_formatter(self):
"""Creates the formatter for the help and usage output."""
return HelpFormatter(
width=self.terminal_width, max_width=self.max_content_width
)
def call_on_close(self, f):
"""This decorator remembers a function as callback that should be
executed when the context tears down. This is most useful to bind
resource handling to the script execution. For instance, file objects
opened by the :class:`File` type will register their close callbacks
here.
:param f: the function to execute on teardown.
"""
self._close_callbacks.append(f)
return f
def close(self):
"""Invokes all close callbacks."""
for cb in self._close_callbacks:
cb()
self._close_callbacks = []
@property
def command_path(self):
"""The computed command path. This is used for the ``usage``
information on the help page. It's automatically created by
combining the info names of the chain of contexts to the root.
"""
rv = ""
if self.info_name is not None:
rv = self.info_name
if self.parent is not None:
rv = f"{self.parent.command_path} {rv}"
return rv.lstrip()
def find_root(self):
"""Finds the outermost context."""
node = self
while node.parent is not None:
node = node.parent
return node
def find_object(self, object_type):
"""Finds the closest object of a given type."""
node = self
while node is not None:
if isinstance(node.obj, object_type):
return node.obj
node = node.parent
def ensure_object(self, object_type):
"""Like :meth:`find_object` but sets the innermost object to a
new instance of `object_type` if it does not exist.
"""
rv = self.find_object(object_type)
if rv is None:
self.obj = rv = object_type()
return rv
def lookup_default(self, name):
"""Looks up the default for a parameter name. This by default
looks into the :attr:`default_map` if available.
"""
if self.default_map is not None:
rv = self.default_map.get(name)
if callable(rv):
rv = rv()
return rv
def fail(self, message):
"""Aborts the execution of the program with a specific error
message.
:param message: the error message to fail with.
"""
raise UsageError(message, self)
def abort(self):
"""Aborts the script."""
raise Abort()
def exit(self, code=0):
"""Exits the application with a given exit code."""
raise Exit(code)
def get_usage(self):
"""Helper method to get formatted usage string for the current
context and command.
"""
return self.command.get_usage(self)
def get_help(self):
"""Helper method to get formatted help page for the current
context and command.
"""
return self.command.get_help(self)
def invoke(*args, **kwargs): # noqa: B902
"""Invokes a command callback in exactly the way it expects. There
are two ways to invoke this method:
1. the first argument can be a callback and all other arguments and
keyword arguments are forwarded directly to the function.
2. the first argument is a click command object. In that case all
arguments are forwarded as well but proper click parameters
(options and click arguments) must be keyword arguments and Click
will fill in defaults.
Note that before Click 3.2 keyword arguments were not properly filled
in against the intention of this code and no context was created. For
more information about this change and why it was done in a bugfix
release see :ref:`upgrade-to-3.2`.
"""
self, callback = args[:2]
ctx = self
# It's also possible to invoke another command which might or
# might not have a callback. In that case we also fill
# in defaults and make a new context for this command.
if isinstance(callback, Command):
other_cmd = callback
callback = other_cmd.callback
ctx = Context(other_cmd, info_name=other_cmd.name, parent=self)
if callback is None:
raise TypeError(
"The given command does not have a callback that can be invoked."
)
for param in other_cmd.params:
if param.name not in kwargs and param.expose_value:
kwargs[param.name] = param.get_default(ctx)
args = args[2:]
with augment_usage_errors(self):
with ctx:
return callback(*args, **kwargs)
def forward(*args, **kwargs): # noqa: B902
"""Similar to :meth:`invoke` but fills in default keyword
arguments from the current context if the other command expects
it. This cannot invoke callbacks directly, only other commands.
"""
self, cmd = args[:2]
# It's also possible to invoke another command which might or
# might not have a callback.
if not isinstance(cmd, Command):
raise TypeError("Callback is not a command.")
for param in self.params:
if param not in kwargs:
kwargs[param] = self.params[param]
return self.invoke(cmd, **kwargs)
def set_parameter_source(self, name, source):
"""Set the source of a parameter.
This indicates the location from which the value of the
parameter was obtained.
:param name: the name of the command line parameter
:param source: the source of the command line parameter, which
should be a valid ParameterSource value
"""
ParameterSource.validate(source)
self._source_by_paramname[name] = source
def get_parameter_source(self, name):
"""Get the source of a parameter.
This indicates the location from which the value of the
parameter was obtained. This can be useful for determining
when a user specified an option on the command line that is
the same as the default. In that case, the source would be
ParameterSource.COMMANDLINE, even though the value of the
parameter was equivalent to the default.
:param name: the name of the command line parameter
:returns: the source
:rtype: ParameterSource
"""
return self._source_by_paramname[name]
class BaseCommand:
"""The base command implements the minimal API contract of commands.
Most code will never use this as it does not implement a lot of useful
functionality but it can act as the direct subclass of alternative
parsing methods that do not depend on the Click parser.
For instance, this can be used to bridge Click and other systems like
argparse or docopt.
Because base commands do not implement a lot of the API that other
parts of Click take for granted, they are not supported for all
operations. For instance, they cannot be used with the decorators
usually and they have no built-in callback system.
.. versionchanged:: 2.0
Added the `context_settings` parameter.
:param name: the name of the command to use unless a group overrides it.
:param context_settings: an optional dictionary with defaults that are
passed to the context object.
"""
#: the default for the :attr:`Context.allow_extra_args` flag.
allow_extra_args = False
#: the default for the :attr:`Context.allow_interspersed_args` flag.
allow_interspersed_args = True
#: the default for the :attr:`Context.ignore_unknown_options` flag.
ignore_unknown_options = False
def __init__(self, name, context_settings=None):
#: the name the command thinks it has. Upon registering a command
#: on a :class:`Group` the group will default the command name
#: with this information. You should instead use the
#: :class:`Context`\'s :attr:`~Context.info_name` attribute.
self.name = name
if context_settings is None:
context_settings = {}
#: an optional dictionary with defaults passed to the context.
self.context_settings = context_settings
def __repr__(self):
return f"<{self.__class__.__name__} {self.name}>"
def get_usage(self, ctx):
raise NotImplementedError("Base commands cannot get usage")
def get_help(self, ctx):
raise NotImplementedError("Base commands cannot get help")
def make_context(self, info_name, args, parent=None, **extra):
"""This function when given an info name and arguments will kick
off the parsing and create a new :class:`Context`. It does not
invoke the actual command callback though.
:param info_name: the info name for this invokation. Generally this
is the most descriptive name for the script or
command. For the toplevel script it's usually
the name of the script, for commands below it it's
the name of the script.
:param args: the arguments to parse as list of strings.
:param parent: the parent context if available.
:param extra: extra keyword arguments forwarded to the context
constructor.
"""
for key, value in self.context_settings.items():
if key not in extra:
extra[key] = value
ctx = Context(self, info_name=info_name, parent=parent, **extra)
with ctx.scope(cleanup=False):
self.parse_args(ctx, args)
return ctx
def parse_args(self, ctx, args):
"""Given a context and a list of arguments this creates the parser
and parses the arguments, then modifies the context as necessary.
This is automatically invoked by :meth:`make_context`.
"""
raise NotImplementedError("Base commands do not know how to parse arguments.")
def invoke(self, ctx):
"""Given a context, this invokes the command. The default
implementation is raising a not implemented error.
"""
raise NotImplementedError("Base commands are not invokable by default")
def main(
self,
args=None,
prog_name=None,
complete_var=None,
standalone_mode=True,
**extra,
):
"""This is the way to invoke a script with all the bells and
whistles as a command line application. This will always terminate
the application after a call. If this is not wanted, ``SystemExit``
needs to be caught.
This method is also available by directly calling the instance of
a :class:`Command`.
.. versionadded:: 3.0
Added the `standalone_mode` flag to control the standalone mode.
:param args: the arguments that should be used for parsing. If not
provided, ``sys.argv[1:]`` is used.
:param prog_name: the program name that should be used. By default
the program name is constructed by taking the file
name from ``sys.argv[0]``.
:param complete_var: the environment variable that controls the
bash completion support. The default is
``"_<prog_name>_COMPLETE"`` with prog_name in
uppercase.
:param standalone_mode: the default behavior is to invoke the script
in standalone mode. Click will then
handle exceptions and convert them into
error messages and the function will never
return but shut down the interpreter. If
this is set to `False` they will be
propagated to the caller and the return
value of this function is the return value
of :meth:`invoke`.
:param extra: extra keyword arguments are forwarded to the context
constructor. See :class:`Context` for more information.
"""
# Verify that the environment is configured correctly, or reject
# further execution to avoid a broken script.
_verify_python_env()
if args is None:
args = sys.argv[1:]
else:
args = list(args)
if prog_name is None:
prog_name = make_str(
os.path.basename(sys.argv[0] if sys.argv else __file__)
)
# Hook for the Bash completion. This only activates if the Bash
# completion is actually enabled, otherwise this is quite a fast
# noop.
_bashcomplete(self, prog_name, complete_var)
try:
try:
with self.make_context(prog_name, args, **extra) as ctx:
rv = self.invoke(ctx)
if not standalone_mode:
return rv
# it's not safe to `ctx.exit(rv)` here!
# note that `rv` may actually contain data like "1" which
# has obvious effects
# more subtle case: `rv=[None, None]` can come out of
# chained commands which all returned `None` -- so it's not
# even always obvious that `rv` indicates success/failure
# by its truthiness/falsiness
ctx.exit()
except (EOFError, KeyboardInterrupt):
echo(file=sys.stderr)
raise Abort()
except ClickException as e:
if not standalone_mode:
raise
e.show()
sys.exit(e.exit_code)
except OSError as e:
if e.errno == errno.EPIPE:
sys.stdout = PacifyFlushWrapper(sys.stdout)
sys.stderr = PacifyFlushWrapper(sys.stderr)
sys.exit(1)
else:
raise
except Exit as e:
if standalone_mode:
sys.exit(e.exit_code)
else:
# in non-standalone mode, return the exit code
# note that this is only reached if `self.invoke` above raises
# an Exit explicitly -- thus bypassing the check there which
# would return its result
# the results of non-standalone execution may therefore be
# somewhat ambiguous: if there are codepaths which lead to
# `ctx.exit(1)` and to `return 1`, the caller won't be able to
# tell the difference between the two
return e.exit_code
except Abort:
if not standalone_mode:
raise
echo("Aborted!", file=sys.stderr)
sys.exit(1)
def __call__(self, *args, **kwargs):
"""Alias for :meth:`main`."""
return self.main(*args, **kwargs)
class Command(BaseCommand):
"""Commands are the basic building block of command line interfaces in
Click. A basic command handles command line parsing and might dispatch
more parsing to commands nested below it.
.. versionchanged:: 2.0
Added the `context_settings` parameter.
.. versionchanged:: 8.0
Added repr showing the command name
.. versionchanged:: 7.1
Added the `no_args_is_help` parameter.
:param name: the name of the command to use unless a group overrides it.
:param context_settings: an optional dictionary with defaults that are
passed to the context object.
:param callback: the callback to invoke. This is optional.
:param params: the parameters to register with this command. This can
be either :class:`Option` or :class:`Argument` objects.
:param help: the help string to use for this command.
:param epilog: like the help string but it's printed at the end of the
help page after everything else.
:param short_help: the short help to use for this command. This is
shown on the command listing of the parent command.
:param add_help_option: by default each command registers a ``--help``
option. This can be disabled by this parameter.
:param no_args_is_help: this controls what happens if no arguments are
provided. This option is disabled by default.
If enabled this will add ``--help`` as argument
if no arguments are passed
:param hidden: hide this command from help outputs.
:param deprecated: issues a message indicating that
the command is deprecated.
"""
def __init__(
self,
name,
context_settings=None,
callback=None,
params=None,
help=None,
epilog=None,
short_help=None,
options_metavar="[OPTIONS]",
add_help_option=True,
no_args_is_help=False,
hidden=False,
deprecated=False,
):
BaseCommand.__init__(self, name, context_settings)
#: the callback to execute when the command fires. This might be
#: `None` in which case nothing happens.
self.callback = callback
#: the list of parameters for this command in the order they
#: should show up in the help page and execute. Eager parameters
#: will automatically be handled before non eager ones.
self.params = params or []
# if a form feed (page break) is found in the help text, truncate help
# text to the content preceding the first form feed
if help and "\f" in help:
help = help.split("\f", 1)[0]
self.help = help
self.epilog = epilog
self.options_metavar = options_metavar
self.short_help = short_help
self.add_help_option = add_help_option
self.no_args_is_help = no_args_is_help
self.hidden = hidden
self.deprecated = deprecated
def __repr__(self):
return f"<{self.__class__.__name__} {self.name}>"
def get_usage(self, ctx):
"""Formats the usage line into a string and returns it.
Calls :meth:`format_usage` internally.
"""
formatter = ctx.make_formatter()
self.format_usage(ctx, formatter)
return formatter.getvalue().rstrip("\n")
def get_params(self, ctx):
rv = self.params
help_option = self.get_help_option(ctx)
if help_option is not None:
rv = rv + [help_option]
return rv
def format_usage(self, ctx, formatter):
"""Writes the usage line into the formatter.
This is a low-level method called by :meth:`get_usage`.
"""
pieces = self.collect_usage_pieces(ctx)
formatter.write_usage(ctx.command_path, " ".join(pieces))
def collect_usage_pieces(self, ctx):
"""Returns all the pieces that go into the usage line and returns
it as a list of strings.
"""
rv = [self.options_metavar]
for param in self.get_params(ctx):
rv.extend(param.get_usage_pieces(ctx))
return rv
def get_help_option_names(self, ctx):
"""Returns the names for the help option."""
all_names = set(ctx.help_option_names)
for param in self.params:
all_names.difference_update(param.opts)
all_names.difference_update(param.secondary_opts)
return all_names
def get_help_option(self, ctx):
"""Returns the help option object."""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
if value and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return Option(
help_options,
is_flag=True,
is_eager=True,
expose_value=False,
callback=show_help,
help="Show this message and exit.",
)
def make_parser(self, ctx):
"""Creates the underlying option parser for this command."""
parser = OptionParser(ctx)
for param in self.get_params(ctx):
param.add_to_parser(parser, ctx)
return parser
def get_help(self, ctx):
"""Formats the help into a string and returns it.
Calls :meth:`format_help` internally.
"""
formatter = ctx.make_formatter()
self.format_help(ctx, formatter)
return formatter.getvalue().rstrip("\n")
def get_short_help_str(self, limit=45):
"""Gets short help for the command or makes it by shortening the
long help string.
"""
return (
self.short_help
or self.help
and make_default_short_help(self.help, limit)
or ""
)
def format_help(self, ctx, formatter):
"""Writes the help into the formatter if it exists.
This is a low-level method called by :meth:`get_help`.
This calls the following methods:
- :meth:`format_usage`
- :meth:`format_help_text`
- :meth:`format_options`
- :meth:`format_epilog`
"""
self.format_usage(ctx, formatter)
self.format_help_text(ctx, formatter)
self.format_options(ctx, formatter)
self.format_epilog(ctx, formatter)
def format_help_text(self, ctx, formatter):
"""Writes the help text to the formatter if it exists."""
if self.help:
formatter.write_paragraph()
with formatter.indentation():
help_text = self.help
if self.deprecated:
help_text += DEPRECATED_HELP_NOTICE
formatter.write_text(help_text)
elif self.deprecated:
formatter.write_paragraph()
with formatter.indentation():
formatter.write_text(DEPRECATED_HELP_NOTICE)
def format_options(self, ctx, formatter):
"""Writes all the options into the formatter if they exist."""
opts = []
for param in self.get_params(ctx):
rv = param.get_help_record(ctx)
if rv is not None:
opts.append(rv)
if opts:
with formatter.section("Options"):
formatter.write_dl(opts)
def format_epilog(self, ctx, formatter):
"""Writes the epilog into the formatter if it exists."""
if self.epilog:
formatter.write_paragraph()
with formatter.indentation():
formatter.write_text(self.epilog)
def parse_args(self, ctx, args):
if not args and self.no_args_is_help and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
parser = self.make_parser(ctx)
opts, args, param_order = parser.parse_args(args=args)
for param in iter_params_for_processing(param_order, self.get_params(ctx)):
value, args = param.handle_parse_result(ctx, opts, args)
if args and not ctx.allow_extra_args and not ctx.resilient_parsing:
ctx.fail(
"Got unexpected extra"
f" argument{'s' if len(args) != 1 else ''}"
f" ({' '.join(map(make_str, args))})"
)
ctx.args = args
return args
def invoke(self, ctx):
"""Given a context, this invokes the attached callback (if it exists)
in the right way.
"""
_maybe_show_deprecated_notice(self)
if self.callback is not None:
return ctx.invoke(self.callback, **ctx.params)
class MultiCommand(Command):
"""A multi command is the basic implementation of a command that
dispatches to subcommands. The most common version is the
:class:`Group`.
:param invoke_without_command: this controls how the multi command itself
is invoked. By default it's only invoked
if a subcommand is provided.
:param no_args_is_help: this controls what happens if no arguments are
provided. This option is enabled by default if
`invoke_without_command` is disabled or disabled
if it's enabled. If enabled this will add
``--help`` as argument if no arguments are
passed.
:param subcommand_metavar: the string that is used in the documentation
to indicate the subcommand place.
:param chain: if this is set to `True` chaining of multiple subcommands
is enabled. This restricts the form of commands in that
they cannot have optional arguments but it allows
multiple commands to be chained together.
:param result_callback: the result callback to attach to this multi
command.
"""
allow_extra_args = True
allow_interspersed_args = False
def __init__(
self,
name=None,
invoke_without_command=False,
no_args_is_help=None,
subcommand_metavar=None,
chain=False,
result_callback=None,
**attrs,
):
Command.__init__(self, name, **attrs)
if no_args_is_help is None:
no_args_is_help = not invoke_without_command
self.no_args_is_help = no_args_is_help
self.invoke_without_command = invoke_without_command
if subcommand_metavar is None:
if chain:
subcommand_metavar = SUBCOMMANDS_METAVAR
else:
subcommand_metavar = SUBCOMMAND_METAVAR
self.subcommand_metavar = subcommand_metavar
self.chain = chain
#: The result callback that is stored. This can be set or
#: overridden with the :func:`resultcallback` decorator.
self.result_callback = result_callback
if self.chain:
for param in self.params:
if isinstance(param, Argument) and not param.required:
raise RuntimeError(
"Multi commands in chain mode cannot have"
" optional arguments."
)
def collect_usage_pieces(self, ctx):
rv = Command.collect_usage_pieces(self, ctx)
rv.append(self.subcommand_metavar)
return rv
def format_options(self, ctx, formatter):
Command.format_options(self, ctx, formatter)
self.format_commands(ctx, formatter)
def resultcallback(self, replace=False):
"""Adds a result callback to the chain command. By default if a
result callback is already registered this will chain them but
this can be disabled with the `replace` parameter. The result
callback is invoked with the return value of the subcommand
(or the list of return values from all subcommands if chaining
is enabled) as well as the parameters as they would be passed
to the main callback.
Example::
@click.group()
@click.option('-i', '--input', default=23)
def cli(input):
return 42
@cli.resultcallback()
def process_result(result, input):
return result + input
.. versionadded:: 3.0
:param replace: if set to `True` an already existing result
callback will be removed.
"""
def decorator(f):
old_callback = self.result_callback
if old_callback is None or replace:
self.result_callback = f
return f
def function(__value, *args, **kwargs):
return f(old_callback(__value, *args, **kwargs), *args, **kwargs)
self.result_callback = rv = update_wrapper(function, f)
return rv
return decorator
def format_commands(self, ctx, formatter):
"""Extra format methods for multi methods that adds all the commands
after the options.
"""
commands = []
for subcommand in self.list_commands(ctx):
cmd = self.get_command(ctx, subcommand)
# What is this, the tool lied about a command. Ignore it
if cmd is None:
continue
if cmd.hidden:
continue
commands.append((subcommand, cmd))
# allow for 3 times the default spacing
if len(commands):
limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands)
rows = []
for subcommand, cmd in commands:
help = cmd.get_short_help_str(limit)
rows.append((subcommand, help))
if rows:
with formatter.section("Commands"):
formatter.write_dl(rows)
def parse_args(self, ctx, args):
if not args and self.no_args_is_help and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
rest = Command.parse_args(self, ctx, args)
if self.chain:
ctx.protected_args = rest
ctx.args = []
elif rest:
ctx.protected_args, ctx.args = rest[:1], rest[1:]
return ctx.args
def invoke(self, ctx):
def _process_result(value):
if self.result_callback is not None:
value = ctx.invoke(self.result_callback, value, **ctx.params)
return value
if not ctx.protected_args:
# If we are invoked without command the chain flag controls
# how this happens. If we are not in chain mode, the return
# value here is the return value of the command.
# If however we are in chain mode, the return value is the
# return value of the result processor invoked with an empty
# list (which means that no subcommand actually was executed).
if self.invoke_without_command:
if not self.chain:
return Command.invoke(self, ctx)
with ctx:
Command.invoke(self, ctx)
return _process_result([])
ctx.fail("Missing command.")
# Fetch args back out
args = ctx.protected_args + ctx.args
ctx.args = []
ctx.protected_args = []
# If we're not in chain mode, we only allow the invocation of a
# single command but we also inform the current context about the
# name of the command to invoke.
if not self.chain:
# Make sure the context is entered so we do not clean up
# resources until the result processor has worked.
with ctx:
cmd_name, cmd, args = self.resolve_command(ctx, args)
ctx.invoked_subcommand = cmd_name
Command.invoke(self, ctx)
sub_ctx = cmd.make_context(cmd_name, args, parent=ctx)
with sub_ctx:
return _process_result(sub_ctx.command.invoke(sub_ctx))
# In chain mode we create the contexts step by step, but after the
# base command has been invoked. Because at that point we do not
# know the subcommands yet, the invoked subcommand attribute is
# set to ``*`` to inform the command that subcommands are executed
# but nothing else.
with ctx:
ctx.invoked_subcommand = "*" if args else None
Command.invoke(self, ctx)
# Otherwise we make every single context and invoke them in a
# chain. In that case the return value to the result processor
# is the list of all invoked subcommand's results.
contexts = []
while args:
cmd_name, cmd, args = self.resolve_command(ctx, args)
sub_ctx = cmd.make_context(
cmd_name,
args,
parent=ctx,
allow_extra_args=True,
allow_interspersed_args=False,
)
contexts.append(sub_ctx)
args, sub_ctx.args = sub_ctx.args, []
rv = []
for sub_ctx in contexts:
with sub_ctx:
rv.append(sub_ctx.command.invoke(sub_ctx))
return _process_result(rv)
def resolve_command(self, ctx, args):
cmd_name = make_str(args[0])
original_cmd_name = cmd_name
# Get the command
cmd = self.get_command(ctx, cmd_name)
# If we can't find the command but there is a normalization
# function available, we try with that one.
if cmd is None and ctx.token_normalize_func is not None:
cmd_name = ctx.token_normalize_func(cmd_name)
cmd = self.get_command(ctx, cmd_name)
# If we don't find the command we want to show an error message
# to the user that it was not provided. However, there is
# something else we should do: if the first argument looks like
# an option we want to kick off parsing again for arguments to
# resolve things like --help which now should go to the main
# place.
if cmd is None and not ctx.resilient_parsing:
if split_opt(cmd_name)[0]:
self.parse_args(ctx, ctx.args)
ctx.fail(f"No such command '{original_cmd_name}'.")
return cmd_name, cmd, args[1:]
def get_command(self, ctx, cmd_name):
"""Given a context and a command name, this returns a
:class:`Command` object if it exists or returns `None`.
"""
raise NotImplementedError()
def list_commands(self, ctx):
"""Returns a list of subcommand names in the order they should
appear.
"""
return []
class Group(MultiCommand):
"""A group allows a command to have subcommands attached. This is the
most common way to implement nesting in Click.
:param commands: a dictionary of commands.
"""
def __init__(self, name=None, commands=None, **attrs):
MultiCommand.__init__(self, name, **attrs)
#: the registered subcommands by their exported names.
self.commands = commands or {}
def add_command(self, cmd, name=None):
"""Registers another :class:`Command` with this group. If the name
is not provided, the name of the command is used.
"""
name = name or cmd.name
if name is None:
raise TypeError("Command has no name.")
_check_multicommand(self, name, cmd, register=True)
self.commands[name] = cmd
def command(self, *args, **kwargs):
"""A shortcut decorator for declaring and attaching a command to
the group. This takes the same arguments as :func:`command` but
immediately registers the created command with this instance by
calling into :meth:`add_command`.
"""
from .decorators import command
def decorator(f):
cmd = command(*args, **kwargs)(f)
self.add_command(cmd)
return cmd
return decorator
def group(self, *args, **kwargs):
"""A shortcut decorator for declaring and attaching a group to
the group. This takes the same arguments as :func:`group` but
immediately registers the created command with this instance by
calling into :meth:`add_command`.
"""
from .decorators import group
def decorator(f):
cmd = group(*args, **kwargs)(f)
self.add_command(cmd)
return cmd
return decorator
def get_command(self, ctx, cmd_name):
return self.commands.get(cmd_name)
def list_commands(self, ctx):
return sorted(self.commands)
class CommandCollection(MultiCommand):
"""A command collection is a multi command that merges multiple multi
commands together into one. This is a straightforward implementation
that accepts a list of different multi commands as sources and
provides all the commands for each of them.
"""
def __init__(self, name=None, sources=None, **attrs):
MultiCommand.__init__(self, name, **attrs)
#: The list of registered multi commands.
self.sources = sources or []
def add_source(self, multi_cmd):
"""Adds a new multi command to the chain dispatcher."""
self.sources.append(multi_cmd)
def get_command(self, ctx, cmd_name):
for source in self.sources:
rv = source.get_command(ctx, cmd_name)
if rv is not None:
if self.chain:
_check_multicommand(self, cmd_name, rv)
return rv
def list_commands(self, ctx):
rv = set()
for source in self.sources:
rv.update(source.list_commands(ctx))
return sorted(rv)
class Parameter:
r"""A parameter to a command comes in two versions: they are either
:class:`Option`\s or :class:`Argument`\s. Other subclasses are currently
not supported by design as some of the internals for parsing are
intentionally not finalized.
Some settings are supported by both options and arguments.
:param param_decls: the parameter declarations for this option or
argument. This is a list of flags or argument
names.
:param type: the type that should be used. Either a :class:`ParamType`
or a Python type. The later is converted into the former
automatically if supported.
:param required: controls if this is optional or not.
:param default: the default value if omitted. This can also be a callable,
in which case it's invoked when the default is needed
without any arguments.
:param callback: a callback that should be executed after the parameter
was matched. This is called as ``fn(ctx, param,
value)`` and needs to return the value.
:param nargs: the number of arguments to match. If not ``1`` the return
value is a tuple instead of single value. The default for
nargs is ``1`` (except if the type is a tuple, then it's
the arity of the tuple). If ``nargs=-1``, all remaining
parameters are collected.
:param metavar: how the value is represented in the help page.
:param expose_value: if this is `True` then the value is passed onwards
to the command callback and stored on the context,
otherwise it's skipped.
:param is_eager: eager values are processed before non eager ones. This
should not be set for arguments or it will inverse the
order of processing.
:param envvar: a string or list of strings that are environment variables
that should be checked.
.. versionchanged:: 7.1
Empty environment variables are ignored rather than taking the
empty string value. This makes it possible for scripts to clear
variables if they can't unset them.
.. versionchanged:: 2.0
Changed signature for parameter callback to also be passed the
parameter. The old callback format will still work, but it will
raise a warning to give you a chance to migrate the code easier.
"""
param_type_name = "parameter"
def __init__(
self,
param_decls=None,
type=None,
required=False,
default=None,
callback=None,
nargs=None,
metavar=None,
expose_value=True,
is_eager=False,
envvar=None,
autocompletion=None,
):
self.name, self.opts, self.secondary_opts = self._parse_decls(
param_decls or (), expose_value
)
self.type = convert_type(type, default)
# Default nargs to what the type tells us if we have that
# information available.
if nargs is None:
if self.type.is_composite:
nargs = self.type.arity
else:
nargs = 1
self.required = required
self.callback = callback
self.nargs = nargs
self.multiple = False
self.expose_value = expose_value
self.default = default
self.is_eager = is_eager
self.metavar = metavar
self.envvar = envvar
self.autocompletion = autocompletion
def __repr__(self):
return f"<{self.__class__.__name__} {self.name}>"
@property
def human_readable_name(self):
"""Returns the human readable name of this parameter. This is the
same as the name for options, but the metavar for arguments.
"""
return self.name
def make_metavar(self):
if self.metavar is not None:
return self.metavar
metavar = self.type.get_metavar(self)
if metavar is None:
metavar = self.type.name.upper()
if self.nargs != 1:
metavar += "..."
return metavar
def get_default(self, ctx):
"""Given a context variable this calculates the default value."""
# Otherwise go with the regular default.
if callable(self.default):
rv = self.default()
else:
rv = self.default
return self.type_cast_value(ctx, rv)
def add_to_parser(self, parser, ctx):
pass
def consume_value(self, ctx, opts):
value = opts.get(self.name)
source = ParameterSource.COMMANDLINE
if value is None:
value = self.value_from_envvar(ctx)
source = ParameterSource.ENVIRONMENT
if value is None:
value = ctx.lookup_default(self.name)
source = ParameterSource.DEFAULT_MAP
if value is not None:
ctx.set_parameter_source(self.name, source)
return value
def type_cast_value(self, ctx, value):
"""Given a value this runs it properly through the type system.
This automatically handles things like `nargs` and `multiple` as
well as composite types.
"""
if self.type.is_composite:
if self.nargs <= 1:
raise TypeError(
"Attempted to invoke composite type but nargs has"
f" been set to {self.nargs}. This is not supported;"
" nargs needs to be set to a fixed value > 1."
)
if self.multiple:
return tuple(self.type(x or (), self, ctx) for x in value or ())
return self.type(value or (), self, ctx)
def _convert(value, level):
if level == 0:
return self.type(value, self, ctx)
return tuple(_convert(x, level - 1) for x in value or ())
return _convert(value, (self.nargs != 1) + bool(self.multiple))
def process_value(self, ctx, value):
"""Given a value and context this runs the logic to convert the
value as necessary.
"""
# If the value we were given is None we do nothing. This way
# code that calls this can easily figure out if something was
# not provided. Otherwise it would be converted into an empty
# tuple for multiple invocations which is inconvenient.
if value is not None:
return self.type_cast_value(ctx, value)
def value_is_missing(self, value):
if value is None:
return True
if (self.nargs != 1 or self.multiple) and value == ():
return True
return False
def full_process_value(self, ctx, value):
value = self.process_value(ctx, value)
if value is None and not ctx.resilient_parsing:
value = self.get_default(ctx)
if value is not None:
ctx.set_parameter_source(self.name, ParameterSource.DEFAULT)
if self.required and self.value_is_missing(value):
raise MissingParameter(ctx=ctx, param=self)
return value
def resolve_envvar_value(self, ctx):
if self.envvar is None:
return
if isinstance(self.envvar, (tuple, list)):
for envvar in self.envvar:
rv = os.environ.get(envvar)
if rv is not None:
return rv
else:
rv = os.environ.get(self.envvar)
if rv != "":
return rv
def value_from_envvar(self, ctx):
rv = self.resolve_envvar_value(ctx)
if rv is not None and self.nargs != 1:
rv = self.type.split_envvar_value(rv)
return rv
def handle_parse_result(self, ctx, opts, args):
with augment_usage_errors(ctx, param=self):
value = self.consume_value(ctx, opts)
try:
value = self.full_process_value(ctx, value)
except Exception:
if not ctx.resilient_parsing:
raise
value = None
if self.callback is not None:
try:
value = self.callback(ctx, self, value)
except Exception:
if not ctx.resilient_parsing:
raise
if self.expose_value:
ctx.params[self.name] = value
return value, args
def get_help_record(self, ctx):
pass
def get_usage_pieces(self, ctx):
return []
def get_error_hint(self, ctx):
"""Get a stringified version of the param for use in error messages to
indicate which param caused the error.
"""
hint_list = self.opts or [self.human_readable_name]
return " / ".join(repr(x) for x in hint_list)
class Option(Parameter):
"""Options are usually optional values on the command line and
have some extra features that arguments don't have.
All other parameters are passed onwards to the parameter constructor.
:param show_default: controls if the default value should be shown on the
help page. Normally, defaults are not shown. If this
value is a string, it shows the string instead of the
value. This is particularly useful for dynamic options.
:param show_envvar: controls if an environment variable should be shown on
the help page. Normally, environment variables
are not shown.
:param prompt: if set to `True` or a non empty string then the user will be
prompted for input. If set to `True` the prompt will be the
option name capitalized.
:param confirmation_prompt: if set then the value will need to be confirmed
if it was prompted for.
:param hide_input: if this is `True` then the input on the prompt will be
hidden from the user. This is useful for password
input.
:param is_flag: forces this option to act as a flag. The default is
auto detection.
:param flag_value: which value should be used for this flag if it's
enabled. This is set to a boolean automatically if
the option string contains a slash to mark two options.
:param multiple: if this is set to `True` then the argument is accepted
multiple times and recorded. This is similar to ``nargs``
in how it works but supports arbitrary number of
arguments.
:param count: this flag makes an option increment an integer.
:param allow_from_autoenv: if this is enabled then the value of this
parameter will be pulled from an environment
variable in case a prefix is defined on the
context.
:param help: the help string.
:param hidden: hide this option from help outputs.
"""
param_type_name = "option"
def __init__(
self,
param_decls=None,
show_default=False,
prompt=False,
confirmation_prompt=False,
hide_input=False,
is_flag=None,
flag_value=None,
multiple=False,
count=False,
allow_from_autoenv=True,
type=None,
help=None,
hidden=False,
show_choices=True,
show_envvar=False,
**attrs,
):
default_is_missing = attrs.get("default", _missing) is _missing
Parameter.__init__(self, param_decls, type=type, **attrs)
if prompt is True:
prompt_text = self.name.replace("_", " ").capitalize()
elif prompt is False:
prompt_text = None
else:
prompt_text = prompt
self.prompt = prompt_text
self.confirmation_prompt = confirmation_prompt
self.hide_input = hide_input
self.hidden = hidden
# Flags
if is_flag is None:
if flag_value is not None:
is_flag = True
else:
is_flag = bool(self.secondary_opts)
if is_flag and default_is_missing:
self.default = False
if flag_value is None:
flag_value = not self.default
self.is_flag = is_flag
self.flag_value = flag_value
if self.is_flag and isinstance(self.flag_value, bool) and type in [None, bool]:
self.type = BOOL
self.is_bool_flag = True
else:
self.is_bool_flag = False
# Counting
self.count = count
if count:
if type is None:
self.type = IntRange(min=0)
if default_is_missing:
self.default = 0
self.multiple = multiple
self.allow_from_autoenv = allow_from_autoenv
self.help = help
self.show_default = show_default
self.show_choices = show_choices
self.show_envvar = show_envvar
# Sanity check for stuff we don't support
if __debug__:
if self.nargs < 0:
raise TypeError("Options cannot have nargs < 0")
if self.prompt and self.is_flag and not self.is_bool_flag:
raise TypeError("Cannot prompt for flags that are not bools.")
if not self.is_bool_flag and self.secondary_opts:
raise TypeError("Got secondary option for non boolean flag.")
if self.is_bool_flag and self.hide_input and self.prompt is not None:
raise TypeError("Hidden input does not work with boolean flag prompts.")
if self.count:
if self.multiple:
raise TypeError(
"Options cannot be multiple and count at the same time."
)
elif self.is_flag:
raise TypeError(
"Options cannot be count and flags at the same time."
)
def _parse_decls(self, decls, expose_value):
opts = []
secondary_opts = []
name = None
possible_names = []
for decl in decls:
if decl.isidentifier():
if name is not None:
raise TypeError("Name defined twice")
name = decl
else:
split_char = ";" if decl[:1] == "/" else "/"
if split_char in decl:
first, second = decl.split(split_char, 1)
first = first.rstrip()
if first:
possible_names.append(split_opt(first))
opts.append(first)
second = second.lstrip()
if second:
secondary_opts.append(second.lstrip())
else:
possible_names.append(split_opt(decl))
opts.append(decl)
if name is None and possible_names:
possible_names.sort(key=lambda x: -len(x[0])) # group long options first
name = possible_names[0][1].replace("-", "_").lower()
if not name.isidentifier():
name = None
if name is None:
if not expose_value:
return None, opts, secondary_opts
raise TypeError("Could not determine name for option")
if not opts and not secondary_opts:
raise TypeError(
f"No options defined but a name was passed ({name})."
" Did you mean to declare an argument instead of an"
" option?"
)
return name, opts, secondary_opts
def add_to_parser(self, parser, ctx):
kwargs = {
"dest": self.name,
"nargs": self.nargs,
"obj": self,
}
if self.multiple:
action = "append"
elif self.count:
action = "count"
else:
action = "store"
if self.is_flag:
kwargs.pop("nargs", None)
action_const = f"{action}_const"
if self.is_bool_flag and self.secondary_opts:
parser.add_option(self.opts, action=action_const, const=True, **kwargs)
parser.add_option(
self.secondary_opts, action=action_const, const=False, **kwargs
)
else:
parser.add_option(
self.opts, action=action_const, const=self.flag_value, **kwargs
)
else:
kwargs["action"] = action
parser.add_option(self.opts, **kwargs)
def get_help_record(self, ctx):
if self.hidden:
return
any_prefix_is_slash = []
def _write_opts(opts):
rv, any_slashes = join_options(opts)
if any_slashes:
any_prefix_is_slash[:] = [True]
if not self.is_flag and not self.count:
rv += f" {self.make_metavar()}"
return rv
rv = [_write_opts(self.opts)]
if self.secondary_opts:
rv.append(_write_opts(self.secondary_opts))
help = self.help or ""
extra = []
if self.show_envvar:
envvar = self.envvar
if envvar is None:
if self.allow_from_autoenv and ctx.auto_envvar_prefix is not None:
envvar = f"{ctx.auto_envvar_prefix}_{self.name.upper()}"
if envvar is not None:
var_str = (
", ".join(str(d) for d in envvar)
if isinstance(envvar, (list, tuple))
else envvar
)
extra.append(f"env var: {var_str}")
if self.default is not None and (self.show_default or ctx.show_default):
if isinstance(self.show_default, str):
default_string = f"({self.show_default})"
elif isinstance(self.default, (list, tuple)):
default_string = ", ".join(str(d) for d in self.default)
elif inspect.isfunction(self.default):
default_string = "(dynamic)"
else:
default_string = self.default
extra.append(f"default: {default_string}")
if self.required:
extra.append("required")
if extra:
extra_str = ";".join(extra)
help = f"{help} [{extra_str}]" if help else f"[{extra_str}]"
return ("; " if any_prefix_is_slash else " / ").join(rv), help
def get_default(self, ctx):
# If we're a non boolean flag our default is more complex because
# we need to look at all flags in the same group to figure out
# if we're the the default one in which case we return the flag
# value as default.
if self.is_flag and not self.is_bool_flag:
for param in ctx.command.params:
if param.name == self.name and param.default:
return param.flag_value
return None
return Parameter.get_default(self, ctx)
def prompt_for_value(self, ctx):
"""This is an alternative flow that can be activated in the full
value processing if a value does not exist. It will prompt the
user until a valid value exists and then returns the processed
value as result.
"""
# Calculate the default before prompting anything to be stable.
default = self.get_default(ctx)
# If this is a prompt for a flag we need to handle this
# differently.
if self.is_bool_flag:
return confirm(self.prompt, default)
return prompt(
self.prompt,
default=default,
type=self.type,
hide_input=self.hide_input,
show_choices=self.show_choices,
confirmation_prompt=self.confirmation_prompt,
value_proc=lambda x: self.process_value(ctx, x),
)
def resolve_envvar_value(self, ctx):
rv = Parameter.resolve_envvar_value(self, ctx)
if rv is not None:
return rv
if self.allow_from_autoenv and ctx.auto_envvar_prefix is not None:
envvar = f"{ctx.auto_envvar_prefix}_{self.name.upper()}"
return os.environ.get(envvar)
def value_from_envvar(self, ctx):
rv = self.resolve_envvar_value(ctx)
if rv is None:
return None
value_depth = (self.nargs != 1) + bool(self.multiple)
if value_depth > 0 and rv is not None:
rv = self.type.split_envvar_value(rv)
if self.multiple and self.nargs != 1:
rv = batch(rv, self.nargs)
return rv
def full_process_value(self, ctx, value):
if value is None and self.prompt is not None and not ctx.resilient_parsing:
return self.prompt_for_value(ctx)
return Parameter.full_process_value(self, ctx, value)
class Argument(Parameter):
"""Arguments are positional parameters to a command. They generally
provide fewer features than options but can have infinite ``nargs``
and are required by default.
All parameters are passed onwards to the parameter constructor.
"""
param_type_name = "argument"
def __init__(self, param_decls, required=None, **attrs):
if required is None:
if attrs.get("default") is not None:
required = False
else:
required = attrs.get("nargs", 1) > 0
Parameter.__init__(self, param_decls, required=required, **attrs)
if self.default is not None and self.nargs < 0:
raise TypeError(
"nargs=-1 in combination with a default value is not supported."
)
@property
def human_readable_name(self):
if self.metavar is not None:
return self.metavar
return self.name.upper()
def make_metavar(self):
if self.metavar is not None:
return self.metavar
var = self.type.get_metavar(self)
if not var:
var = self.name.upper()
if not self.required:
var = f"[{var}]"
if self.nargs != 1:
var += "..."
return var
def _parse_decls(self, decls, expose_value):
if not decls:
if not expose_value:
return None, [], []
raise TypeError("Could not determine name for argument")
if len(decls) == 1:
name = arg = decls[0]
name = name.replace("-", "_").lower()
else:
raise TypeError(
"Arguments take exactly one parameter declaration, got"
f" {len(decls)}."
)
return name, [arg], []
def get_usage_pieces(self, ctx):
return [self.make_metavar()]
def get_error_hint(self, ctx):
return repr(self.make_metavar())
def add_to_parser(self, parser, ctx):
parser.add_argument(dest=self.name, nargs=self.nargs, obj=self)
| {
"content_hash": "0724a4b8356a32f679a05b29a019885b",
"timestamp": "",
"source": "github",
"line_count": 2070,
"max_line_length": 88,
"avg_line_length": 38.285990338164254,
"alnum_prop": 0.5805405541815979,
"repo_name": "rochacbruno/dynaconf",
"id": "b7124df4fb15b278db20f596f696ef74212b086b",
"size": "79252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dynaconf/vendor_src/click/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2867"
},
{
"name": "Makefile",
"bytes": "11505"
},
{
"name": "Python",
"bytes": "1438471"
},
{
"name": "Shell",
"bytes": "14740"
}
],
"symlink_target": ""
} |
"""Trains an OpenAI Baselines DQN agent on bsuite.
Note that OpenAI Gym is not installed with bsuite by default.
See also github.com/openai/baselines for more information.
"""
from absl import app
from absl import flags
from baselines import deepq
import bsuite
from bsuite import sweep
from bsuite.baselines.utils import pool
from bsuite.logging import terminal_logging
from bsuite.utils import gym_wrapper
# Internal imports.
# Experiment flags.
flags.DEFINE_string(
'bsuite_id', 'catch/0', 'BSuite identifier. '
'This global flag can be used to control which environment is loaded.')
flags.DEFINE_string('save_path', '/tmp/bsuite', 'where to save bsuite results')
flags.DEFINE_enum('logging_mode', 'csv', ['csv', 'sqlite', 'terminal'],
'which form of logging to use for bsuite results')
flags.DEFINE_boolean('overwrite', False, 'overwrite csv logging if found')
flags.DEFINE_boolean('verbose', True, 'whether to log to std output')
flags.DEFINE_integer('num_hidden_layers', 2, 'number of hidden layers')
flags.DEFINE_integer('num_units', 50, 'number of units per hidden layer')
flags.DEFINE_integer('batch_size', 32, 'size of batches sampled from replay')
flags.DEFINE_float('agent_discount', .99, 'discounting on the agent side')
flags.DEFINE_integer('replay_capacity', 100000, 'size of the replay buffer')
flags.DEFINE_integer('min_replay_size', 128, 'min replay size before training.')
flags.DEFINE_integer('sgd_period', 1, 'steps between online net updates')
flags.DEFINE_integer('target_update_period', 4,
'steps between target net updates')
flags.DEFINE_float('learning_rate', 1e-3, 'learning rate for optimizer')
flags.DEFINE_float('epsilon', 0.05, 'fraction of exploratory random actions')
flags.DEFINE_integer('seed', 42, 'seed for random number generation')
flags.DEFINE_integer('num_episodes', None, 'Number of episodes to run for.')
flags.DEFINE_integer('total_timesteps', 10_000_000,
'maximum steps if not caught by bsuite_num_episodes')
FLAGS = flags.FLAGS
def run(bsuite_id: str) -> str:
"""Runs a DQN agent on a given bsuite environment, logging to CSV."""
raw_env = bsuite.load_and_record(
bsuite_id=bsuite_id,
save_path=FLAGS.save_path,
logging_mode=FLAGS.logging_mode,
overwrite=FLAGS.overwrite,
)
if FLAGS.verbose:
raw_env = terminal_logging.wrap_environment(raw_env, log_every=True) # pytype: disable=wrong-arg-types
env = gym_wrapper.GymFromDMEnv(raw_env)
num_episodes = FLAGS.num_episodes or getattr(raw_env, 'bsuite_num_episodes')
def callback(lcl, unused_glb):
# Terminate after `num_episodes`.
try:
return lcl['num_episodes'] > num_episodes
except KeyError:
return False
# Note: we should never run for this many steps as we end after `num_episodes`
total_timesteps = FLAGS.total_timesteps
deepq.learn(
env=env,
network='mlp',
hiddens=[FLAGS.num_units] * FLAGS.num_hidden_layers,
batch_size=FLAGS.batch_size,
lr=FLAGS.learning_rate,
total_timesteps=total_timesteps,
buffer_size=FLAGS.replay_capacity,
exploration_fraction=1./total_timesteps, # i.e. immediately anneal.
exploration_final_eps=FLAGS.epsilon, # constant epsilon.
print_freq=None, # pylint: disable=wrong-arg-types
learning_starts=FLAGS.min_replay_size,
target_network_update_freq=FLAGS.target_update_period,
callback=callback, # pytype: disable=wrong-arg-types
gamma=FLAGS.agent_discount,
checkpoint_freq=None,
)
return bsuite_id
def main(argv):
# Parses whether to run a single bsuite_id, or multiprocess sweep.
del argv # Unused.
bsuite_id = FLAGS.bsuite_id
if bsuite_id in sweep.SWEEP:
print(f'Running single experiment: bsuite_id={bsuite_id}.')
run(bsuite_id)
elif hasattr(sweep, bsuite_id):
bsuite_sweep = getattr(sweep, bsuite_id)
print(f'Running sweep over bsuite_id in sweep.{bsuite_sweep}')
FLAGS.verbose = False
pool.map_mpi(run, bsuite_sweep)
else:
raise ValueError(f'Invalid flag: bsuite_id={bsuite_id}.')
if __name__ == '__main__':
app.run(main)
| {
"content_hash": "2474f9eda8211840766ff04075535a85",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 107,
"avg_line_length": 35.53846153846154,
"alnum_prop": 0.7061087061087061,
"repo_name": "deepmind/bsuite",
"id": "598801fbeb568716faa3641371943bfc1544516a",
"size": "4908",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "bsuite/baselines/third_party/openai_dqn/run.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "106470"
},
{
"name": "Python",
"bytes": "448602"
},
{
"name": "Shell",
"bytes": "2425"
},
{
"name": "TeX",
"bytes": "233184"
}
],
"symlink_target": ""
} |
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("SVC_poly" , "BinaryClass_100" , "duckdb")
| {
"content_hash": "8df3496e6911d63a65cc6ae0c658624a",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 72,
"avg_line_length": 34.75,
"alnum_prop": 0.7697841726618705,
"repo_name": "antoinecarme/sklearn2sql_heroku",
"id": "3a86462ed398e8e2f01c75e0ba071465caf73092",
"size": "139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/classification/BinaryClass_100/ws_BinaryClass_100_SVC_poly_duckdb_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "507043"
},
{
"name": "Procfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "1021137"
},
{
"name": "R",
"bytes": "2521"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.base.payload import Payload
from pants.contrib.node.targets.node_package import NodePackage
class NodeModule(NodePackage):
"""A Node module."""
def __init__(self, sources=None, address=None, payload=None, **kwargs):
"""
:param sources: Javascript and other source code files that make up this module; paths are
relative to the BUILD file's directory.
:type sources: `globs`, `rglobs` or a list of strings
"""
# TODO(John Sirois): Support devDependencies, etc. The devDependencies case is not
# clear-cut since pants controlled builds would provide devDependencies as needed to perform
# tasks. The reality is likely to be though that both pants will never cover all cases, and a
# back door to execute new tools during development will be desirable and supporting conversion
# of pre-existing package.json files as node_module targets will require this.
payload = payload or Payload()
payload.add_fields({
'sources': self.create_sources_field(sources=sources,
sources_rel_path=address.spec_path,
key_arg='sources'),
})
super(NodeModule, self).__init__(address=address, payload=payload, **kwargs)
| {
"content_hash": "c55deecdba1fed56a8cc413b471891cf",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 99,
"avg_line_length": 49.44827586206897,
"alnum_prop": 0.6687587168758717,
"repo_name": "cevaris/pants",
"id": "f8eff28667734188477592b93277d052b224fa77",
"size": "1581",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "contrib/node/src/python/pants/contrib/node/targets/node_module.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "767"
},
{
"name": "CSS",
"bytes": "11139"
},
{
"name": "GAP",
"bytes": "4818"
},
{
"name": "Go",
"bytes": "1596"
},
{
"name": "HTML",
"bytes": "68162"
},
{
"name": "Java",
"bytes": "314216"
},
{
"name": "JavaScript",
"bytes": "10157"
},
{
"name": "Protocol Buffer",
"bytes": "7038"
},
{
"name": "Python",
"bytes": "3282583"
},
{
"name": "Scala",
"bytes": "77693"
},
{
"name": "Shell",
"bytes": "47890"
},
{
"name": "Thrift",
"bytes": "3485"
}
],
"symlink_target": ""
} |
from random import randint
import pytest
from pynq import Overlay
from pynq.lib.pmod import Pmod_DevMode
from pynq.lib.pmod import PMODA
from pynq.lib.pmod import PMODB
from pynq.lib.pmod import PMOD_SWCFG_IIC0_TOPROW
from pynq.lib.pmod import PMOD_SWCFG_IIC0_BOTROW
from pynq.lib.pmod import PMOD_SWCFG_DIOALL
from pynq.lib.pmod import PMOD_DIO_BASEADDR
from pynq.lib.pmod import PMOD_DIO_TRI_OFFSET
from pynq.lib.pmod import PMOD_DIO_DATA_OFFSET
from pynq.lib.pmod import PMOD_CFG_DIO_ALLOUTPUT
from pynq.lib.pmod import PMOD_CFG_DIO_ALLINPUT
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "[email protected]"
try:
_ = Overlay('base.bit', download=False)
flag = True
except IOError:
flag = False
@pytest.mark.skipif(not flag, reason="need base overlay to run")
def test_pmod_devmode():
"""Tests the Pmod DevMode.
The first test will instantiate DevMode objects with various switch
configurations. The returned objects should not be None.
The second test write a command to the mailbox and read another command
from the mailbox. Test whether the write and the read are successful.
"""
ol = Overlay('base.bit')
for mb_info in [PMODA, PMODB]:
assert Pmod_DevMode(mb_info, PMOD_SWCFG_IIC0_TOPROW) is not None
assert Pmod_DevMode(mb_info, PMOD_SWCFG_IIC0_BOTROW) is not None
assert Pmod_DevMode(mb_info, PMOD_SWCFG_DIOALL) is not None
ol.reset()
# Initiate the Microblaze
microblaze = Pmod_DevMode(mb_info, PMOD_SWCFG_DIOALL)
microblaze.start()
assert microblaze.status() == "RUNNING"
# Test whether writing is successful
data = 0
microblaze.write_cmd(PMOD_DIO_BASEADDR + PMOD_DIO_TRI_OFFSET,
PMOD_CFG_DIO_ALLOUTPUT)
microblaze.write_cmd(PMOD_DIO_BASEADDR + PMOD_DIO_DATA_OFFSET, data)
# Test whether reading is successful
microblaze.write_cmd(PMOD_DIO_BASEADDR + PMOD_DIO_TRI_OFFSET,
PMOD_CFG_DIO_ALLINPUT)
data = microblaze.read_cmd(PMOD_DIO_BASEADDR + PMOD_DIO_DATA_OFFSET)
assert data is not None
# Stop the Microblaze
microblaze.stop()
assert microblaze.status() == "STOPPED"
ol.reset()
del ol
| {
"content_hash": "27c5d972d69911fcf4e8ad22e1bfa0ca",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 76,
"avg_line_length": 33.114285714285714,
"alnum_prop": 0.6807592752372735,
"repo_name": "cathalmccabe/PYNQ",
"id": "74bb4b2c695fe4023f51a7eb431f7026ca01fe03",
"size": "3939",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pynq/lib/pmod/tests/test_pmod_devmode.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "BitBake",
"bytes": "1839"
},
{
"name": "C",
"bytes": "1110727"
},
{
"name": "C++",
"bytes": "74784"
},
{
"name": "CMake",
"bytes": "578"
},
{
"name": "JavaScript",
"bytes": "239958"
},
{
"name": "Jupyter Notebook",
"bytes": "17143645"
},
{
"name": "Makefile",
"bytes": "150630"
},
{
"name": "PHP",
"bytes": "2117"
},
{
"name": "Python",
"bytes": "1583136"
},
{
"name": "Shell",
"bytes": "76262"
},
{
"name": "SystemVerilog",
"bytes": "53374"
},
{
"name": "Tcl",
"bytes": "1389138"
},
{
"name": "VHDL",
"bytes": "738710"
},
{
"name": "Verilog",
"bytes": "284588"
}
],
"symlink_target": ""
} |
__author__ = 'nick.york'
__license__ = 'https://www.apache.org/licenses/LICENSE-2.0'
__copyright__ = 'Copyright (c) 2015 Virtual Instruments Corporation. All rights reserved.'
__date__ = '2015-02-22'
__version__ = '1.0'
import sys, os, optparse
import json
class Entity:
def __init__(self, name, wwn):
self.name = name
self.wwn = wwn
self.type = "fcport"
def __lt__(self, other):
return self.name < other.name
def to_JSON(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=False, indent=2)
# container for all the applications so that we can json
class Top:
def __init__(self):
self.version = 1
self.entities = []
def to_JSON(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=False, indent=2)
def ParseCmdLineParameters():
opts = optparse.OptionParser(description='Convert CSV to JSON Entity Import File for VirtualWisdom.')
opts.add_option("-i", "--input", action="store", type="string", dest="inputfile")
opts.add_option("-o", "--output", action="store", type="string", dest="outputfile")
opt, argv = opts.parse_args()
if opt.inputfile != None:
if not os.path.exists(opt.inputfile):
PrintHelpAndExit("Specified input file does not exist.")
return opt
def PrintHelpAndExit(errormessage=""):
if (errormessage != ""):
print("\n\n" + errormessage)
print("\n\nUsage:\n\tCSVNicknameToJSON -i <Input CSV> -o <Output JSON>\n\n\tIf input or output are not specified, stdin and stdout are used, respectively.\n\n\t\tpython3 CSVNicknameToJSON.py -i input.csv -o output.json\n\n\t\tcat input.csv | python3 CSVNickNameToJSON.py | python3 EntityImport.py -v 10.20.30.40 -u Administrator -p admin\n\n\t\tInput file should be in the format WWN,Nickname with one entry per line.\n\n")
exit()
def main():
options = ParseCmdLineParameters()
# input should either come from a text file, or from stdin
if options.inputfile != None:
fi = open(options.inputfile, 'r')
else:
fi = sys.stdin
# set up object to store the entries in so we can dump it to JSON when done
top = Top()
# iterate through the input file .. WWN,Nickname
for line in fi:
if not "," in line:
continue
# create a new object and stuff it in the top container
top.entities.append(Entity(line.split(',')[1].strip().replace("'","").replace('"',""), line.split(',')[0].strip()))
# output will go to a text file or to stdout if no file is specified
if options.outputfile != None:
fo = open(options.outputfile, 'w')
else:
fo = sys.stdout
# export the python object to JSON
with fo as outfile:
outfile.write(top.to_JSON())
if __name__ == '__main__':
main() | {
"content_hash": "3b06159521337047ebaf063e1dde274e",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 427,
"avg_line_length": 37.25,
"alnum_prop": 0.6375838926174496,
"repo_name": "signalpillar/tools",
"id": "d204e7bbf0a747ee07fab998384f82cc408e2b54",
"size": "2855",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CSVNicknameToJSON.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "49810"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class ProposalConfig(AppConfig):
name = 'proposal'
| {
"content_hash": "6bdba227038e46c2cfcdf55483b78d33",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 18.2,
"alnum_prop": 0.7582417582417582,
"repo_name": "rg3915/orcamentos",
"id": "e9ae9b6c47a581cd43c29d5f5e653e62ea3d095e",
"size": "91",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orcamentos/proposal/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "50664"
},
{
"name": "HTML",
"bytes": "542962"
},
{
"name": "JavaScript",
"bytes": "133637"
},
{
"name": "Jupyter Notebook",
"bytes": "134102"
},
{
"name": "Makefile",
"bytes": "1730"
},
{
"name": "Python",
"bytes": "197204"
},
{
"name": "Shell",
"bytes": "10278"
}
],
"symlink_target": ""
} |
""" run coverage after patching """
#pylint: disable=W0611,wrong-import-order
import test.run.patch
from coverage.cmdline import main
import sys
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "3e728f7a96d73a1c367d92d004044521",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 41,
"avg_line_length": 21.666666666666668,
"alnum_prop": 0.6923076923076923,
"repo_name": "davedoesdev/python-jwt",
"id": "6de49ef4b3a429c85057d7eebe141f81dc6fd517",
"size": "217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/run/run_coverage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12429"
},
{
"name": "HTML",
"bytes": "4284"
},
{
"name": "JavaScript",
"bytes": "21981"
},
{
"name": "Makefile",
"bytes": "922"
},
{
"name": "Python",
"bytes": "78811"
}
],
"symlink_target": ""
} |
import mock
from neutron import context
from neutron.tests.unit.plugins.ml2 import test_plugin
from networking_odl.common import constants as const
from networking_odl.ml2 import mech_driver as driver
class TestODLShim(test_plugin.Ml2PluginV2TestCase):
def setUp(self):
super(TestODLShim, self).setUp()
self.context = context.get_admin_context()
self.plugin = mock.Mock()
self.driver = driver.OpenDaylightMechanismDriver()
self.driver.odl_drv = mock.Mock()
def test_create_network_postcommit(self):
self.driver.create_network_postcommit(self.context)
self.driver.odl_drv.synchronize.assert_called_with('create',
const.ODL_NETWORKS,
self.context)
def test_update_network_postcommit(self):
self.driver.update_network_postcommit(self.context)
self.driver.odl_drv.synchronize.assert_called_with('update',
const.ODL_NETWORKS,
self.context)
def test_delete_network_postcommit(self):
self.driver.delete_network_postcommit(self.context)
self.driver.odl_drv.synchronize.assert_called_with('delete',
const.ODL_NETWORKS,
self.context)
def test_create_subnet_postcommit(self):
self.driver.create_subnet_postcommit(self.context)
self.driver.odl_drv.synchronize.assert_called_with('create',
const.ODL_SUBNETS,
self.context)
def test_update_subnet_postcommit(self):
self.driver.update_subnet_postcommit(self.context)
self.driver.odl_drv.synchronize.assert_called_with('update',
const.ODL_SUBNETS,
self.context)
def test_delete_subnet_postcommit(self):
self.driver.delete_subnet_postcommit(self.context)
self.driver.odl_drv.synchronize.assert_called_with('delete',
const.ODL_SUBNETS,
self.context)
def test_create_port_postcommit(self):
self.driver.create_port_postcommit(self.context)
self.driver.odl_drv.synchronize.assert_called_with('create',
const.ODL_PORTS,
self.context)
def test_update_port_postcommit(self):
self.driver.update_port_postcommit(self.context)
self.driver.odl_drv.synchronize.assert_called_with('update',
const.ODL_PORTS,
self.context)
def test_delete_port_postcommit(self):
self.driver.delete_port_postcommit(self.context)
self.driver.odl_drv.synchronize.assert_called_with('delete',
const.ODL_PORTS,
self.context)
def test_bind_port_delegation(self):
# given front-end with attached back-end
front_end = self.driver
front_end.odl_drv = back_end = mock.MagicMock(
spec=driver.OpenDaylightDriver)
# given PortContext to be forwarded to back-end without using
context = object()
# when binding port
front_end.bind_port(context)
# then port is bound by back-end
back_end.bind_port.assert_called_once_with(context)
| {
"content_hash": "ddf30b813b6e8adf0f82fab28bfd2279",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 78,
"avg_line_length": 46.523809523809526,
"alnum_prop": 0.5186796315250768,
"repo_name": "FedericoRessi/networking-odl",
"id": "e2ceda56c82d31e9c52cad03a7c6491411405f1d",
"size": "4553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "networking_odl/tests/unit/ml2/test_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groff",
"bytes": "4119"
},
{
"name": "Mako",
"bytes": "1053"
},
{
"name": "Python",
"bytes": "290131"
},
{
"name": "Shell",
"bytes": "26159"
}
],
"symlink_target": ""
} |
from finance import db
from finance.models.account import Account
class Transaction(db.Model):
"""Transaction
Record of the transaction
"""
transaction_id = db.Column(db.Integer, primary_key=True)
account_debit_id = db.Column(db.Integer,
db.ForeignKey('account.account_id'))
account_debit = db.relationship(Account,
foreign_keys=[account_debit_id],
backref=db.backref('debits',
lazy='dynamic'))
account_credit_id = db.Column(db.Integer,
db.ForeignKey('account.account_id'))
account_credit = db.relationship(Account,
foreign_keys=[account_credit_id],
backref=db.backref('credits',
lazy='dynamic'))
amount = db.Column(db.Float(precision=2))
summary = db.Column(db.String(50))
description = db.Column(db.String(250))
date = db.Column(db.Date)
def __init__(
self,
account_debit=None,
account_credit=None,
amount=None,
summary=None,
date=None,
description=None
):
self.account_debit = account_debit
self.account_credit = account_credit
self.amount = amount
self.summary = summary
self.date = date
self.description = description
def __repr__(self):
#TODO determine account debit and then show amount in
# negative or positive
# or think of a better short description of transaction to show
return '<Transaction: {summary} {amount}>'.format(
summary=self.summary,
amount=self.amount
)
def jsonify(self):
debit = getattr(self.account_debit, 'account_id', None)
credit = getattr(self.account_credit, 'account_id', None)
res = {
'transaction_id': self.transaction_id,
'account_debit_id': debit,
'account_credit_id': credit,
'amount': self.amount,
'summary': self.summary,
'description': self.description,
'date': self.date.strftime("%Y-%m-%d")
}
if self.account_debit is not None:
res['debit'] = self.account_debit.jsonify()
if self.account_credit is not None:
res['credit'] = self.account_credit.jsonify()
# balance may be set as a running total for an account
if hasattr(self, 'balance'):
res['balance'] = self.balance
return res
| {
"content_hash": "f4c682b6dbe9775042416e48c7d4596c",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 72,
"avg_line_length": 34.81818181818182,
"alnum_prop": 0.5374860126818352,
"repo_name": "reinbach/finance",
"id": "1157b555f4c958ec1027c657d06bcd0c8285445b",
"size": "2681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/finance/models/transaction.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "537"
},
{
"name": "HTML",
"bytes": "12718"
},
{
"name": "JavaScript",
"bytes": "42149"
},
{
"name": "Python",
"bytes": "94442"
},
{
"name": "Ruby",
"bytes": "503"
},
{
"name": "Shell",
"bytes": "512"
}
],
"symlink_target": ""
} |
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=1, faild_point=100000, path_list=[
[TestAction.create_mini_vm, 'vm1', 'cluster=cluster1'],
[TestAction.create_volume, 'volume1', 'cluster=cluster1', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume_backup, 'volume1', 'volume1-backup1'],
[TestAction.start_vm, 'vm1'],
[TestAction.stop_vm, 'vm1'],
[TestAction.create_mini_vm, 'vm2', 'cluster=cluster2'],
[TestAction.create_image_from_volume, 'vm2', 'vm2-image1'],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.create_volume, 'volume2', 'cluster=cluster1', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'cluster=cluster2', 'flag=scsi'],
[TestAction.delete_volume, 'volume3'],
[TestAction.add_image, 'image2', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.delete_volume_backup, 'volume1-backup1'],
[TestAction.delete_image, 'image2'],
[TestAction.recover_image, 'image2'],
[TestAction.delete_image, 'image2'],
[TestAction.expunge_image, 'image2'],
[TestAction.create_volume, 'volume4', 'cluster=cluster2', 'flag=scsi'],
[TestAction.attach_volume, 'vm2', 'volume4'],
[TestAction.start_vm, 'vm2'],
[TestAction.create_volume_backup, 'volume4', 'volume4-backup2'],
[TestAction.stop_vm, 'vm2'],
[TestAction.add_image, 'image3', 'root', os.environ.get('isoForVmUrl')],
[TestAction.create_vm_by_image, 'image3', 'iso', 'vm3', 'cluster=cluster1'],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.start_vm, 'vm1'],
[TestAction.migrate_vm, 'vm1'],
[TestAction.detach_volume, 'volume1'],
[TestAction.delete_volume, 'volume4'],
[TestAction.create_vm_backup, 'vm3', 'vm3-backup3'],
[TestAction.delete_vm_backup, 'vm3-backup3'],
[TestAction.create_mini_vm, 'vm4', 'network=random', 'cluster=cluster1'],
[TestAction.delete_volume, 'volume2'],
[TestAction.expunge_volume, 'volume2'],
[TestAction.start_vm, 'vm2'],
[TestAction.reboot_vm, 'vm2'],
[TestAction.create_vm_backup, 'vm2', 'vm2-backup4'],
[TestAction.resize_volume, 'vm1', 5*1024*1024],
[TestAction.poweroff_only, 'cluster=cluster1'],
[TestAction.delete_vm_backup, 'vm2-backup4'],
])
'''
The final status:
Running:['vm2']
Stopped:['vm3', 'vm1', 'vm4']
Enadbled:['volume4-backup2', 'vm2-image1', 'image3']
attached:[]
Detached:['volume1']
Deleted:['volume3', 'volume4', 'volume1-backup1', 'vm3-backup3', 'vm2-backup4']
Expunged:['volume2', 'image2']
Ha:[]
Group:
''' | {
"content_hash": "98a0e23d1f7ebce14a18f219a127ed12",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 104,
"avg_line_length": 40.92307692307692,
"alnum_prop": 0.6921052631578948,
"repo_name": "zstackio/zstack-woodpecker",
"id": "b01ecdb8ae9721161cc965e78f2c1ae54a430f36",
"size": "2660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integrationtest/vm/mini/multiclusters/paths/multi_path75.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2356"
},
{
"name": "Go",
"bytes": "49822"
},
{
"name": "Makefile",
"bytes": "687"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "13070596"
},
{
"name": "Shell",
"bytes": "177861"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Copyright (c) 2017 CEVAP
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef ION_CHAINPARAMSSEEDS_H\n')
g.write('#define ION_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the ion network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 12700)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 27170)
g.write('#endif // ION_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| {
"content_hash": "624618a752554cef55e7ae44a3d95ada",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 98,
"avg_line_length": 31.31654676258993,
"alnum_prop": 0.5704112106593154,
"repo_name": "aspaas/ion",
"id": "1c3b301f1807dcfd044bd7569726ac637e4756fe",
"size": "4353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/seeds/generate-seeds.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "616463"
},
{
"name": "C++",
"bytes": "4560754"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "18274"
},
{
"name": "Makefile",
"bytes": "16792"
},
{
"name": "NSIS",
"bytes": "5917"
},
{
"name": "Objective-C++",
"bytes": "6205"
},
{
"name": "Python",
"bytes": "96149"
},
{
"name": "QMake",
"bytes": "20721"
},
{
"name": "Shell",
"bytes": "391146"
}
],
"symlink_target": ""
} |
import asyncio
import unittest
from hbmqtt.mqtt.unsubscribe import UnsubscribePacket, UnubscribePayload
from hbmqtt.mqtt.packet import PacketIdVariableHeader
from hbmqtt.adapters import BufferReader
class UnsubscribePacketTest(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
def test_from_stream(self):
data = b'\xa2\x0c\x00\n\x00\x03a/b\x00\x03c/d'
stream = BufferReader(data)
message = self.loop.run_until_complete(UnsubscribePacket.from_stream(stream))
self.assertEqual(message.payload.topics[0], 'a/b')
self.assertEqual(message.payload.topics[1], 'c/d')
def test_to_stream(self):
variable_header = PacketIdVariableHeader(10)
payload = UnubscribePayload(['a/b', 'c/d'])
publish = UnsubscribePacket(variable_header=variable_header, payload=payload)
out = publish.to_bytes()
self.assertEqual(out, b'\xa2\x0c\x00\n\x00\x03a/b\x00\x03c/d')
| {
"content_hash": "b0efa7603101ccfae6c5cafbff265105",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 85,
"avg_line_length": 38.8,
"alnum_prop": 0.7051546391752578,
"repo_name": "beerfactory/hbmqtt",
"id": "fc97c022c3ae08dd54650e858b52dd78816668f4",
"size": "1060",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/mqtt/test_unsubscribe.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "305024"
}
],
"symlink_target": ""
} |
import pytest
import copy
from blitzdb import Document
import six
@pytest.fixture(scope="function")
def mockup_backend():
class Backend(object):
def __init__(self):
self.attributes = {'foo': 'bar', 'baz': 123}
def get(self, DocumentClass, pk):
return DocumentClass(copy.deepcopy(self.attributes))
return Backend()
def test_unicode():
doc = Document({'pk' : 'foo'})
if six.PY2:
assert unicode(str(doc)) == unicode(doc)
else:
assert doc.__unicode__ == doc.__str__
def test_basic_attributes():
attributes = {'foo': 'bar', 'baz': 1243, 'd': {1: 3, 4: 5}, 'l': [1, 2, 3, 4]}
doc = Document(attributes)
assert doc.foo == 'bar'
assert doc.baz == 1243
assert doc.d == {1: 3, 4: 5}
assert doc.l == [1, 2, 3, 4]
assert doc.foo == doc['foo']
assert doc.baz == doc['baz']
assert doc.d == doc['d']
assert doc.attributes == attributes
def test_iteration():
attributes = {'foo': 'bar', 'baz': 1243, 'd': {1: 3, 4: 5}, 'l': [1, 2, 3, 4]}
doc = Document(attributes)
for key in doc:
assert key in attributes
for key,value in doc.items():
assert key in attributes
assert attributes[key] == value
for value in doc.values():
assert value in attributes.values()
def test_attribute_deletion():
attributes = {'foo': 'bar', 'baz': 1243, 'd': {1: 3, 4: 5}, 'l': [1, 2, 3, 4]}
doc = Document(attributes)
del doc.foo
with pytest.raises(AttributeError):
doc.foo
with pytest.raises(KeyError):
doc['foo']
with pytest.raises(KeyError):
del doc['foo']
with pytest.raises(AttributeError):
del doc.foo
def test_lazy_attributes(mockup_backend):
def get_lazy_doc():
return Document({'pk': 1}, lazy=True, default_backend=mockup_backend)
# Fetchin of attribute by class attribute
doc = get_lazy_doc()
assert doc._lazy == True
assert doc.foo == 'bar'
assert doc._lazy == False
# Fetching of attribute by dict
doc = get_lazy_doc()
assert doc._lazy == True
assert doc['foo'] == 'bar'
assert doc._lazy == False
# Getting all attributes
doc = get_lazy_doc()
assert doc._lazy == True
attributes = doc.attributes
del attributes['pk']
assert attributes == mockup_backend.attributes
assert doc._lazy == False
# Deletion by dict
doc = get_lazy_doc()
assert doc._lazy == True
del doc['foo']
with pytest.raises(KeyError):
doc['foo']
assert doc._lazy == False
# Deletion by attribute
doc = get_lazy_doc()
assert doc._lazy == True
del doc.foo
with pytest.raises(AttributeError):
doc.foo
assert doc._lazy == False
# Update by dict
doc = get_lazy_doc()
assert doc._lazy == True
doc['foo'] = 'faz'
assert doc._lazy == False
assert doc['foo'] == 'faz'
# Update by attribute
doc = get_lazy_doc()
assert doc._lazy == True
doc.foo = 'faz'
assert doc._lazy == False
assert doc.foo == 'faz'
def test_container_operations():
attributes = {'foo': 'bar', 'baz': 1243, 'd': {1: 3, 4: 5}, 'l': [1, 2, 3, 4]}
doc = Document(attributes)
with pytest.raises(KeyError):
doc['fooz']
assert ('foo' in doc) == True
assert ('fooz' in doc) == False
assert list(doc.keys()) == list(attributes.keys())
assert list(doc.values()) == list(attributes.values())
assert doc.items() == attributes.items()
def test_different_primary_key_names():
class MyDocument(Document):
class Meta:
primary_key = 'foobar'
doc = MyDocument({'foo': 'bar', 'foobar': 1})
assert doc.pk == 1
doc.pk = 2
assert doc.attributes['foobar'] == 2
def test_delete():
class MyDocument(Document):
class Meta:
primary_key = 'foobar'
doc = MyDocument({'foo': 'bar', 'foobar': 1})
assert doc.pk == 1
assert doc.foo == 'bar'
del doc.foo
with pytest.raises(AttributeError):
doc.foo
with pytest.raises(KeyError):
doc['foo']
| {
"content_hash": "29dcfa310f9f63d5aa040987f878e70b",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 82,
"avg_line_length": 20.6318407960199,
"alnum_prop": 0.5760790933204726,
"repo_name": "kylewm/blitzdb",
"id": "5ef1a4e1bed4868fa43e957be80bad8a4a660ee6",
"size": "4147",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "blitzdb/tests/test_documents.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "174772"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import unittest
import doctest
import os
import r5d4
from r5d4 import app
from r5d4.analytics_worker import start_analytics_worker
from r5d4.analytics_manager import AnalyticsManager
from r5d4.test_settings import REDIS_UNIX_SOCKET_PATH, REDIS_HOST, \
REDIS_PORT, CONFIG_DB
def load_tests(loader, tests, ignore):
# Loading doctests from modules
tests.addTests(doctest.DocTestSuite(r5d4.flask_redis))
tests.addTests(doctest.DocTestSuite(r5d4.utility))
tests.addTests(doctest.DocTestSuite(r5d4.mapping_functions))
tests.addTests(doctest.DocTestSuite(r5d4.analytics_browser))
return tests
def make_absolute_path(relative_path):
ROOT_DIR = os.path.dirname(__file__)
return os.path.abspath(os.path.join(ROOT_DIR, relative_path))
class r5d4TestCase(unittest.TestCase):
def setUp(self):
app.config["TESTING"] = True
app.config["REDIS_UNIX_SOCKET_PATH"] = REDIS_UNIX_SOCKET_PATH
app.config["REDIS_HOST"] = REDIS_HOST
app.config["REDIS_PORT"] = REDIS_PORT
app.config["CONFIG_DB"] = CONFIG_DB
self.flask_app = app
self.app = app.test_client()
self.analytics_worker = start_analytics_worker(app=app)
self.analytics_manager = AnalyticsManager(app=app)
def test_r5d4(self):
# TODO
pass
def tearDown(self):
if self.analytics_worker:
if self.analytics_worker.is_alive():
self.analytics_worker.terminate()
self.analytics_worker.join()
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "9d28aa625ac20a1b58d2c52a98fe603e",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 69,
"avg_line_length": 31.352941176470587,
"alnum_prop": 0.683552220137586,
"repo_name": "practo/r5d4",
"id": "325bb4ab3a4edf9aef8ec7847365bca55ce40930",
"size": "1621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/run_tests.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52496"
}
],
"symlink_target": ""
} |
import subprocess
import os
import json
import sys
def bctest(testDir, testObj, exeext):
execprog = testObj['exec'] + exeext
execargs = testObj['args']
execrun = [execprog] + execargs
stdinCfg = None
inputData = None
if "input" in testObj:
filename = testDir + "/" + testObj['input']
inputData = open(filename).read()
stdinCfg = subprocess.PIPE
outputFn = None
outputData = None
if "output_cmp" in testObj:
outputFn = testObj['output_cmp']
outputData = open(testDir + "/" + outputFn).read()
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE,universal_newlines=True)
try:
outs = proc.communicate(input=inputData)
except OSError:
print("OSError, Failed to execute " + execprog)
sys.exit(1)
if outputData and (outs[0] != outputData):
print("Output data mismatch for " + outputFn)
sys.exit(1)
wantRC = 0
if "return_code" in testObj:
wantRC = testObj['return_code']
if proc.returncode != wantRC:
print("Return code mismatch for " + outputFn)
sys.exit(1)
def bctester(testDir, input_basename, buildenv):
input_filename = testDir + "/" + input_basename
raw_data = open(input_filename).read()
input_data = json.loads(raw_data)
for testObj in input_data:
bctest(testDir, testObj, buildenv.exeext)
sys.exit(0)
| {
"content_hash": "953e4fdcb7eabc16406545a5d727a7ad",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 121,
"avg_line_length": 26.08,
"alnum_prop": 0.7085889570552147,
"repo_name": "duality-solutions/Sequence",
"id": "aed79d6ab34b213a5ec7fed3ca4cd1d9d97d75b3",
"size": "1646",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/test/bctest.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "7639"
},
{
"name": "C",
"bytes": "383468"
},
{
"name": "C++",
"bytes": "4427710"
},
{
"name": "CSS",
"bytes": "38313"
},
{
"name": "HTML",
"bytes": "20970"
},
{
"name": "Java",
"bytes": "2101"
},
{
"name": "M4",
"bytes": "166652"
},
{
"name": "Makefile",
"bytes": "104593"
},
{
"name": "Objective-C",
"bytes": "2162"
},
{
"name": "Objective-C++",
"bytes": "7444"
},
{
"name": "Python",
"bytes": "229043"
},
{
"name": "QMake",
"bytes": "25524"
},
{
"name": "Roff",
"bytes": "18108"
},
{
"name": "Shell",
"bytes": "46799"
}
],
"symlink_target": ""
} |
import random
import numpy as np
import torch
from torchvision import transforms as T
from torchvision.transforms import functional as F
def pad_if_smaller(img, size, fill=0):
min_size = min(img.size)
if min_size < size:
ow, oh = img.size
padh = size - oh if oh < size else 0
padw = size - ow if ow < size else 0
img = F.pad(img, (0, 0, padw, padh), fill=fill)
return img
class Compose:
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
class RandomResize:
def __init__(self, min_size, max_size=None):
self.min_size = min_size
if max_size is None:
max_size = min_size
self.max_size = max_size
def __call__(self, image, target):
size = random.randint(self.min_size, self.max_size)
image = F.resize(image, size)
target = F.resize(target, size, interpolation=T.InterpolationMode.NEAREST)
return image, target
class RandomHorizontalFlip:
def __init__(self, flip_prob):
self.flip_prob = flip_prob
def __call__(self, image, target):
if random.random() < self.flip_prob:
image = F.hflip(image)
target = F.hflip(target)
return image, target
class RandomCrop:
def __init__(self, size):
self.size = size
def __call__(self, image, target):
image = pad_if_smaller(image, self.size)
target = pad_if_smaller(target, self.size, fill=255)
crop_params = T.RandomCrop.get_params(image, (self.size, self.size))
image = F.crop(image, *crop_params)
target = F.crop(target, *crop_params)
return image, target
class CenterCrop:
def __init__(self, size):
self.size = size
def __call__(self, image, target):
image = F.center_crop(image, self.size)
target = F.center_crop(target, self.size)
return image, target
class PILToTensor:
def __call__(self, image, target):
image = F.pil_to_tensor(image)
target = torch.as_tensor(np.array(target), dtype=torch.int64)
return image, target
class ConvertImageDtype:
def __init__(self, dtype):
self.dtype = dtype
def __call__(self, image, target):
image = F.convert_image_dtype(image, self.dtype)
return image, target
class Normalize:
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, image, target):
image = F.normalize(image, mean=self.mean, std=self.std)
return image, target
| {
"content_hash": "1ace1afb37416255654c72ed87af1369",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 82,
"avg_line_length": 26.99,
"alnum_prop": 0.6005928121526491,
"repo_name": "pytorch/vision",
"id": "518048db2faef36297c4a47c700d7235434fcc0b",
"size": "2699",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "references/segmentation/transforms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "20242"
},
{
"name": "C",
"bytes": "930"
},
{
"name": "C++",
"bytes": "366825"
},
{
"name": "CMake",
"bytes": "18266"
},
{
"name": "Cuda",
"bytes": "90174"
},
{
"name": "Dockerfile",
"bytes": "1608"
},
{
"name": "Java",
"bytes": "21833"
},
{
"name": "Objective-C",
"bytes": "2715"
},
{
"name": "Objective-C++",
"bytes": "3284"
},
{
"name": "PowerShell",
"bytes": "2874"
},
{
"name": "Python",
"bytes": "3952070"
},
{
"name": "Ruby",
"bytes": "1086"
},
{
"name": "Shell",
"bytes": "35660"
}
],
"symlink_target": ""
} |
from . import gxapi_cy
from geosoft.gxapi import GXContext, float_ref, int_ref, str_ref
### endblock ClassImports
### block Header
# NOTICE: The code generator will not replace the code in this block
### endblock Header
### block ClassImplementation
# NOTICE: Do not edit anything here, it is generated code
class GXBIGRID(gxapi_cy.WrapBIGRID):
"""
GXBIGRID class.
The Bigrid class is used to grid data using a optimized algorithm that
assumes data is collected in semi-straight lines.
"""
def __init__(self, handle=0):
super(GXBIGRID, self).__init__(GXContext._get_tls_geo(), handle)
@classmethod
def null(cls):
"""
A null (undefined) instance of `GXBIGRID <geosoft.gxapi.GXBIGRID>`
:returns: A null `GXBIGRID <geosoft.gxapi.GXBIGRID>`
:rtype: GXBIGRID
"""
return GXBIGRID()
def is_null(self):
"""
Check if this is a null (undefined) instance
:returns: True if this is a null (undefined) instance, False otherwise.
:rtype: bool
"""
return self._internal_handle() == 0
# Miscellaneous
def clear(self):
"""
Clears all the parameters in a `GXBIGRID <geosoft.gxapi.GXBIGRID>` object
.. versionadded:: 5.0
**License:** `Geosoft Extended End-User License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-ext-end-user-lic>`_
"""
self._clear()
@classmethod
def create(cls):
"""
Create a handle to a Bigrid object
:returns: `GXBIGRID <geosoft.gxapi.GXBIGRID>` Object
:rtype: GXBIGRID
.. versionadded:: 5.0
**License:** `Geosoft Extended End-User License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-ext-end-user-lic>`_
**Note:** The Bigrid object is initially empty. It will store the
control file parameters which the Bigrid program needs
to execute. Use the LoadParms_BIGRID method to get the
control file parameters into the `GXBIGRID <geosoft.gxapi.GXBIGRID>` object.
"""
ret_val = gxapi_cy.WrapBIGRID._create(GXContext._get_tls_geo())
return GXBIGRID(ret_val)
def load_parms(self, file):
"""
Retrieves a Bigrid object's control parameters from a file,
or sets the parameters to default if the file doesn't exist.
:param file: Name of file to get the parameter settings from
:type file: str
:returns: 0 - Ok
1 - Error
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Extended End-User License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-ext-end-user-lic>`_
**Note:** If the control file name passed into this function is a file
which does not exist, then the defaults for a Bigrid control
file will be generated and put into the `GXBIGRID <geosoft.gxapi.GXBIGRID>` object.
Otherwise, the control file's settings are retrieved from
the file and loaded into the `GXBIGRID <geosoft.gxapi.GXBIGRID>` object.
"""
ret_val = self._load_parms(file.encode())
return ret_val
def load_warp(self, title, cell, warp):
"""
Load a warp projection.
:param title: New grid title
:param cell: New grid cell size as a string, blank for default
:param warp: Warp projection file name
:type title: str
:type cell: str
:type warp: str
:returns: 0 - Ok
1 - Error
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Extended End-User License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-ext-end-user-lic>`_
"""
ret_val = self._load_warp(title.encode(), cell.encode(), warp.encode())
return ret_val
def run(self, zchan, in_dat, out_dat):
"""
Executes the Bigrid program, using the input channel and
output file parameters.
:param zchan: Not used, pass as ""
:param in_dat: Handle to source `GXDAT <geosoft.gxapi.GXDAT>` object (from database)
:param out_dat: Handle to output grid file `GXDAT <geosoft.gxapi.GXDAT>`
:type zchan: str
:type in_dat: GXDAT
:type out_dat: GXDAT
.. versionadded:: 5.0
**License:** `Geosoft Extended End-User License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-ext-end-user-lic>`_
"""
self._run(zchan.encode(), in_dat, out_dat)
def run2(self, zchan, in_dat, out_dat, ipj):
"""
Executes the Bigrid program, using the input channel and
output file parameters with a projection handle.
:param zchan: Not used, pass as ""
:param in_dat: Handle to source `GXDAT <geosoft.gxapi.GXDAT>` object (from database)
:param out_dat: Handle to output grid file `GXDAT <geosoft.gxapi.GXDAT>`
:param ipj: `GXIPJ <geosoft.gxapi.GXIPJ>` handle of the projection system
:type zchan: str
:type in_dat: GXDAT
:type out_dat: GXDAT
:type ipj: GXIPJ
.. versionadded:: 6.3
**License:** `Geosoft Extended End-User License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-ext-end-user-lic>`_
"""
self._run2(zchan.encode(), in_dat, out_dat, ipj)
def save_parms(self, name):
"""
Puts the Bigrid object's control parameters back into
its control file.
:param name: Name of file to put the parameter settings into
:type name: str
.. versionadded:: 5.0
**License:** `Geosoft Extended End-User License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-ext-end-user-lic>`_
**Note:** If the control file did not previously exist, it will be
created. Otherwise, the old file will be overwritten.
"""
self._save_parms(name.encode())
@classmethod
def get_defaults(cls, db, x, y, z, cell, maxLineSeparation, maxPointSeparation, trendAngle, lowPassWavelength, highPass, noneLinear, preFilter):
"""
Get default values for max line separation, max point separation and trend angle.
:param db: Handle to a database
:param x: Y Channel
:param y: X Channel
:param z: Data channel
:param cell: cell size
:param maxLineSeparation: max line separation
:param maxPointSeparation: max point separation
:param trendAngle: trend angle
:param lowPassWavelength: low-pass filter wavelength
:param highPass: high-pass filter wavelength
:param noneLinear: non-linear filter tolerance
:param preFilter: pre-filter sample increment
:type db: GXDB
:type x: str
:type y: str
:type z: str
:type cell: float
:type maxLineSeparation: float_ref
:type maxPointSeparation: float_ref
:type trendAngle: float_ref
:type lowPassWavelength: float_ref
:type highPass: float_ref
:type noneLinear: float_ref
:type preFilter: float_ref
:returns: 0 - Ok
1 - Error
:rtype: int
.. versionadded:: 2021.2
**License:** `Geosoft Extended End-User License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-ext-end-user-lic>`_
"""
ret_val, maxLineSeparation.value, maxPointSeparation.value, trendAngle.value, lowPassWavelength.value, highPass.value, noneLinear.value, preFilter.value = gxapi_cy.WrapBIGRID._get_defaults(GXContext._get_tls_geo(), db, x.encode(), y.encode(), z.encode(), cell, maxLineSeparation.value, maxPointSeparation.value, trendAngle.value, lowPassWavelength.value, highPass.value, noneLinear.value, preFilter.value)
return ret_val
### endblock ClassImplementation
### block ClassExtend
# NOTICE: The code generator will not replace the code in this block
### endblock ClassExtend
### block Footer
# NOTICE: The code generator will not replace the code in this block
### endblock Footer | {
"content_hash": "20c5455c9a818f05157d19e97464fbe1",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 413,
"avg_line_length": 33.99233716475096,
"alnum_prop": 0.5962578899909828,
"repo_name": "GeosoftInc/gxpy",
"id": "3a1c65833b4a75c1f66a1eccfe14381ab4800d92",
"size": "8982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geosoft/gxapi/GXBIGRID.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "4799134"
}
],
"symlink_target": ""
} |
from ._storage_management_client import StorageManagementClient
__all__ = ['StorageManagementClient']
try:
from ._patch import patch_sdk # type: ignore
patch_sdk()
except ImportError:
pass
from ._version import VERSION
__version__ = VERSION
| {
"content_hash": "28a26bd32140bc64f2b83ccd8636128f",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 63,
"avg_line_length": 21.416666666666668,
"alnum_prop": 0.7198443579766537,
"repo_name": "Azure/azure-sdk-for-python",
"id": "f6821446dc1aae8b9079cc67e0479e132abc506b",
"size": "725",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/storage/azure-mgmt-storage/azure/mgmt/storage/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""Layout for columns."""
from math import floor, inf
from .absolute import absolute_layout
from .percent import resolve_percentages
def columns_layout(context, box, bottom_space, skip_stack, containing_block,
page_is_empty, absolute_boxes, fixed_boxes,
adjoining_margins):
"""Lay out a multi-column ``box``."""
from .block import (
block_box_layout, block_level_layout, block_level_width,
collapse_margin, remove_placeholders)
style = box.style
width = style['column_width']
count = style['column_count']
gap = style['column_gap']
height = style['height']
original_bottom_space = bottom_space
context.in_column = True
if style['position'] == 'relative':
# New containing block, use a new absolute list
absolute_boxes = []
box = box.copy_with_children(box.children)
box.position_y += collapse_margin(adjoining_margins) - box.margin_top
# Set height if defined
if height != 'auto' and height.unit != '%':
assert height.unit == 'px'
height_defined = True
empty_space = context.page_bottom - box.content_box_y() - height.value
bottom_space = max(bottom_space, empty_space)
else:
height_defined = False
# TODO: the columns container width can be unknown if the containing block
# needs the size of this block to know its own size
block_level_width(box, containing_block)
# Define the number of columns and their widths
if width == 'auto' and count != 'auto':
width = max(0, box.width - (count - 1) * gap) / count
elif width != 'auto' and count == 'auto':
count = max(1, int(floor((box.width + gap) / (width + gap))))
width = (box.width + gap) / count - gap
else: # overconstrained, with width != 'auto' and count != 'auto'
count = min(count, int(floor((box.width + gap) / (width + gap))))
width = (box.width + gap) / count - gap
# Handle column-span property with the following structure:
# columns_and_blocks = [
# [column_child_1, column_child_2],
# spanning_block,
# …
# ]
columns_and_blocks = []
column_children = []
skip, = skip_stack.keys() if skip_stack else (0,)
for i, child in enumerate(box.children[skip:], start=skip):
if child.style['column_span'] == 'all':
if column_children:
columns_and_blocks.append(
(i - len(column_children), column_children))
columns_and_blocks.append((i, child.copy()))
column_children = []
continue
column_children.append(child.copy())
if column_children:
columns_and_blocks.append(
(i + 1 - len(column_children), column_children))
if skip_stack:
skip_stack = {0: skip_stack[skip]}
if not box.children:
next_page = {'break': 'any', 'page': None}
skip_stack = None
# Find height and balance.
#
# The current algorithm starts from the total available height, to check
# whether the whole content can fit. If it doesn’t fit, we keep the partial
# rendering. If it fits, we try to balance the columns starting from the
# ideal height (the total height divided by the number of columns). We then
# iterate until the last column is not the highest one. At the end of each
# loop, we add the minimal height needed to make one direct child at the
# top of one column go to the end of the previous column.
#
# We rely on a real rendering for each loop, and with a stupid algorithm
# like this it can last minutes…
adjoining_margins = []
current_position_y = box.content_box_y()
new_children = []
column_skip_stack = None
last_loop = False
break_page = False
footnote_area_heights = [
0 if context.current_footnote_area.height == 'auto'
else context.current_footnote_area.margin_height()]
last_footnotes_height = 0
for index, column_children_or_block in columns_and_blocks:
if not isinstance(column_children_or_block, list):
# We have a spanning block, we display it like other blocks
block = column_children_or_block
resolve_percentages(block, containing_block)
block.position_x = box.content_box_x()
block.position_y = current_position_y
new_child, resume_at, next_page, adjoining_margins, _, _ = (
block_level_layout(
context, block, original_bottom_space, skip_stack,
containing_block, page_is_empty, absolute_boxes,
fixed_boxes, adjoining_margins))
skip_stack = None
if new_child is None:
last_loop = True
break_page = True
break
new_children.append(new_child)
current_position_y = (
new_child.border_height() + new_child.border_box_y())
adjoining_margins.append(new_child.margin_bottom)
if resume_at:
last_loop = True
break_page = True
column_skip_stack = resume_at
break
page_is_empty = False
continue
# We have a list of children that we have to balance between columns
column_children = column_children_or_block
# Find the total height available for the first run
current_position_y += collapse_margin(adjoining_margins)
adjoining_margins = []
column_box = _create_column_box(
box, containing_block, column_children, width, current_position_y)
height = max_height = (
context.page_bottom - current_position_y - original_bottom_space)
# Try to render columns until the content fits, increase the column
# height step by step
column_skip_stack = skip_stack
lost_space = inf
original_excluded_shapes = context.excluded_shapes[:]
original_page_is_empty = page_is_empty
page_is_empty = stop_rendering = balancing = False
while True:
# Remove extra excluded shapes introduced during the previous loop
while len(context.excluded_shapes) > len(original_excluded_shapes):
context.excluded_shapes.pop()
# Render the columns
column_skip_stack = skip_stack
consumed_heights = []
new_boxes = []
for i in range(count):
# Render one column
new_box, resume_at, next_page, _, _, _ = block_box_layout(
context, column_box,
context.page_bottom - current_position_y - height,
column_skip_stack, containing_block,
page_is_empty or not balancing, [], [], [],
discard=False, max_lines=None)
if new_box is None:
# We didn't render anything, retry
column_skip_stack = {0: None}
break
new_boxes.append(new_box)
column_skip_stack = resume_at
# Calculate consumed height, empty space and next box height
in_flow_children = [
child for child in new_box.children
if child.is_in_normal_flow()]
if in_flow_children:
# Get the empty space at the bottom of the column box
consumed_height = (
in_flow_children[-1].margin_height() +
in_flow_children[-1].position_y - current_position_y)
empty_space = height - consumed_height
# Get the minimum size needed to render the next box
if column_skip_stack:
next_box = block_box_layout(
context, column_box, inf, column_skip_stack,
containing_block, True, [], [], [],
discard=False, max_lines=None)[0]
for child in next_box.children:
if child.is_in_normal_flow():
next_box_height = child.margin_height()
break
remove_placeholders(context, [next_box], [], [])
else:
next_box_height = 0
else:
consumed_height = empty_space = next_box_height = 0
consumed_heights.append(consumed_height)
# Append the size needed to render the next box in this
# column.
#
# The next box size may be smaller than the empty space, for
# example when the next box can't be separated from its own
# next box. In this case we don't try to find the real value
# and let the workaround below fix this for us.
#
# We also want to avoid very small values that may have been
# introduced by rounding errors. As the workaround below at
# least adds 1 pixel for each loop, we can ignore lost spaces
# lower than 1px.
if next_box_height - empty_space > 1:
lost_space = min(lost_space, next_box_height - empty_space)
# Stop if we already rendered the whole content
if resume_at is None:
break
# Remove placeholders but keep the current footnote area height
last_footnotes_height = (
0 if context.current_footnote_area.height == 'auto'
else context.current_footnote_area.margin_height())
remove_placeholders(context, new_boxes, [], [])
if last_loop:
break
if balancing:
if column_skip_stack is None:
# We rendered the whole content, stop
break
# Increase the column heights and render them again
add_height = 1 if lost_space == inf else lost_space
height += add_height
if height > max_height:
# We reached max height, stop rendering
height = max_height
stop_rendering = True
break
else:
if last_footnotes_height not in footnote_area_heights:
# Footnotes have been rendered, try to re-render with the
# new footnote area height
height -= last_footnotes_height - footnote_area_heights[-1]
footnote_area_heights.append(last_footnotes_height)
continue
everything_fits = (
not column_skip_stack and
max(consumed_heights) <= max_height)
if everything_fits:
# Everything fits, start expanding columns at the average
# of the column heights
max_height -= last_footnotes_height
if style['column_fill'] == 'balance':
balancing = True
height = sum(consumed_heights) / count
else:
break
else:
# Content overflows even at maximum height, stop now and
# let the columns continue on the next page
height += footnote_area_heights[-1]
if len(footnote_area_heights) > 2:
last_footnotes_height = min(
last_footnotes_height, footnote_area_heights[-1])
height -= last_footnotes_height
stop_rendering = True
break
# TODO: check style['max']-height
bottom_space = max(
bottom_space, context.page_bottom - current_position_y - height)
# Replace the current box children with real columns
i = 0
max_column_height = 0
columns = []
while True:
column_box = _create_column_box(
box, containing_block, column_children, width,
current_position_y)
if style['direction'] == 'rtl':
column_box.position_x += box.width - (i + 1) * width - i * gap
else:
column_box.position_x += i * (width + gap)
new_child, column_skip_stack, column_next_page, _, _, _ = (
block_box_layout(
context, column_box, bottom_space, skip_stack,
containing_block, original_page_is_empty, absolute_boxes,
fixed_boxes, None, discard=False, max_lines=None))
if new_child is None:
break_page = True
break
next_page = column_next_page
skip_stack = column_skip_stack
columns.append(new_child)
max_column_height = max(
max_column_height, new_child.margin_height())
if skip_stack is None:
bottom_space = original_bottom_space
break
i += 1
if i == count and not height_defined:
# [If] a declaration that constrains the column height
# (e.g., using height or max-height). In this case,
# additional column boxes are created in the inline
# direction.
break
# Update the current y position and set the columns’ height
current_position_y += min(max_height, max_column_height)
for column in columns:
column.height = max_column_height
new_children.append(column)
skip_stack = None
page_is_empty = False
if stop_rendering:
break
# Report footnotes above the defined footnotes height
_report_footnotes(context, last_footnotes_height)
if box.children and not new_children:
# The box has children but none can be drawn, let's skip the whole box
context.in_column = False
return None, (0, None), {'break': 'any', 'page': None}, [], False
# Set the height of the containing box
box.children = new_children
current_position_y += collapse_margin(adjoining_margins)
height = current_position_y - box.content_box_y()
if box.height == 'auto':
box.height = height
height_difference = 0
else:
height_difference = box.height - height
# Update the latest columns’ height to respect min-height
if box.min_height != 'auto' and box.min_height > box.height:
height_difference += box.min_height - box.height
box.height = box.min_height
for child in new_children[::-1]:
if child.is_column:
child.height += height_difference
else:
break
if style['position'] == 'relative':
# New containing block, resolve the layout of the absolute descendants
for absolute_box in absolute_boxes:
absolute_layout(
context, absolute_box, box, fixed_boxes, bottom_space,
skip_stack=None)
# Calculate skip stack
if column_skip_stack:
skip, = column_skip_stack.keys()
skip_stack = {index + skip: column_skip_stack[skip]}
elif break_page:
skip_stack = {index: None}
# Update page bottom according to the new footnotes
if context.current_footnote_area.height != 'auto':
context.page_bottom += footnote_area_heights[0]
context.page_bottom -= context.current_footnote_area.margin_height()
context.in_column = False
return box, skip_stack, next_page, [], False
def _report_footnotes(context, footnotes_height):
"""Report footnotes above the defined footnotes height."""
if not context.current_page_footnotes:
return
# Report and count footnotes
reported_footnotes = 0
while context.current_footnote_area.margin_height() > footnotes_height:
context.report_footnote(context.current_page_footnotes[-1])
reported_footnotes += 1
# Revert reported footnotes, as they’ve been reported starting from the
# last one
if reported_footnotes >= 2:
extra = context.reported_footnotes[-1:-reported_footnotes-1:-1]
context.reported_footnotes[-reported_footnotes:] = extra
def _create_column_box(box, containing_block, children, width, position_y):
"""Create a column box including given children."""
column_box = box.anonymous_from(box, children=children)
resolve_percentages(column_box, containing_block)
column_box.is_column = True
column_box.width = width
column_box.position_x = box.content_box_x()
column_box.position_y = position_y
return column_box
| {
"content_hash": "83ff2cd94ba587a61321e04f14226267",
"timestamp": "",
"source": "github",
"line_count": 409,
"max_line_length": 79,
"avg_line_length": 41.603911980440095,
"alnum_prop": 0.5619417019275975,
"repo_name": "Kozea/WeasyPrint",
"id": "deb7e17f5bc0076ace3a393d12cd1a8cd57591cb",
"size": "17028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "weasyprint/layout/column.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "48998"
},
{
"name": "HTML",
"bytes": "18932"
},
{
"name": "Python",
"bytes": "1915976"
}
],
"symlink_target": ""
} |
Subsets and Splits