content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from functools import partial
import itertools
import operator
import re
from unittest import SkipTest
import textwrap
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import numpy.random as npr
import jax
from jax._src import api
from jax import core
from jax import lax
from jax import random
from jax import test_util as jtu
from jax import tree_util
from jax._src.util import unzip2
from jax.lib import xla_bridge
from jax.interpreters import xla
import jax.numpy as jnp # scan tests use numpy
import jax.scipy as jsp
from jax.config import config
config.parse_flags_with_absl()
# Some tests are useful for testing both lax.cond and lax.switch. This function
# provides a lax.cond-compatible interface to a two-branch lax.switch. Several
# tests in this file are parameterized such that they either call into lax.cond
# or into this function.
def cond_via_switch(pred, true_fun, false_fun, op, *args):
if len(args) > 0:
assert len(args) == 1
true_op, _true_fun, false_op, _false_fun = true_fun, false_fun, op, args[0]
op = (false_op, true_op)
false_fun = lambda op: _false_fun(op[0])
true_fun = lambda op: _true_fun(op[1])
index = lax.convert_element_type(pred, np.int32)
return lax.switch(index, [false_fun, true_fun], op)
COND_IMPLS = [
(lax.cond, 'cond'),
(cond_via_switch, 'switch'),
]
SCAN_IMPLS = [
(lax.scan, 'unroll1'),
(partial(lax.scan, unroll=2), 'unroll2'),
]
def while_loop_reference(cond, body, carry):
while cond(carry):
carry = body(carry)
return carry
def scan_reference(f, init, xs):
carry = init
ys = []
for x in xs:
(carry, y) = f(carry, x)
ys.append(lax.reshape(y, (1,) + np.shape(y)))
ys = lax.concatenate(ys, 0)
return carry, ys
def high_precision_dot(a, b):
return lax.dot(a, b, precision=lax.Precision.HIGHEST)
def posify(matrix):
return high_precision_dot(matrix, matrix.T.conj())
class LaxControlFlowTest(jtu.JaxTestCase):
def setUp(self):
super().setUp()
jax._src.lax.control_flow._initial_style_open_jaxpr.cache_clear()
jax._src.lax.control_flow._initial_style_jaxpr.cache_clear()
jax._src.lax.control_flow._initial_style_jaxprs_with_common_consts.cache_clear()
def testWhileWithTuple(self):
limit = 10
def loop_cond(state):
pos, _ = state
return lax.lt(pos, limit)
def loop_body(state):
pos, count = state
return (lax.add(pos, 1), lax.add(count, 1))
def loop(init):
result = lax.while_loop(loop_cond, loop_body, (init, 0))
_, count = result
return count
cloop = api.jit(loop)
self.assertEqual(loop(2), limit - 2)
self.assertEqual(cloop(2), limit - 2)
self.assertEqual(cloop(2), limit - 2)
self.assertEqual(cloop(3), limit - 3)
def testWhileWithManyArgs(self):
nargs = 256
def loop_cond(state):
return lax.lt(state[0], 2)
def loop_body(state):
return tuple(lax.add(s, 1) for s in state)
_ = lax.while_loop(loop_cond, loop_body, (0,) * nargs)
def testNestedWhile(self):
def outer_loop(num): # pylint: disable=missing-docstring
def cond_fun(state):
num, i, _ = state
return lax.lt(i, num)
def body_fun(state):
num, i, count = state
return (num, lax.add(i, 1), inner_loop(i, count))
init_val = (num, 0, 0)
_, i, count = lax.while_loop(cond_fun, body_fun, init_val)
return (i, count)
def inner_loop(i, count): # pylint: disable=missing-docstring
def cond_fun(state):
i, j, _ = state
return lax.le(j, i)
def body_fun(state):
i, j, count = state
return (i, lax.add(j, 1), lax.add(count, 1))
init_val = (i, 0, count)
_, _, count = lax.while_loop(cond_fun, body_fun, init_val)
return count
cloop = api.jit(outer_loop)
self.assertEqual(outer_loop(3), (3, 6))
self.assertEqual(cloop(3), (3, 6))
self.assertEqual(cloop(3), (3, 6))
self.assertEqual(cloop(2), (2, 3))
self.assertEqual(cloop(4), (4, 10))
def testWhileWithClosure(self):
def loop(init, local_limit, inc):
def loop_cond(state):
pos, _ = state
return lax.lt(pos, local_limit)
def loop_body(state):
effect[0] = True
pos, count = state
return (lax.add(pos, 1), lax.add(count, inc))
result = lax.while_loop(loop_cond, loop_body, (init, 0))
_, count = result
return count
cloop = api.jit(loop)
limit = 10
effect = [False]
self.assertEqual(loop(2, limit, 1), limit - 2)
assert effect[0]
effect[0] = False
self.assertEqual(cloop(2, limit, 1), limit - 2)
assert effect[0]
effect[0] = False
self.assertEqual(cloop(2, limit, 1), limit - 2)
self.assertEqual(cloop(3, limit, 1), limit - 3)
assert not effect[0]
def testWhileWithClosureJit(self):
def loop(init, local_limit, inc):
def loop_cond(state):
pos, _ = state
return lax.lt(pos, local_limit)
def loop_body(state):
effect[0] = True
pos, count = state
f = lambda pos, inc: (lax.add(pos, 1), lax.add(count, inc))
return api.jit(f)(pos, inc)
result = lax.while_loop(loop_cond, loop_body, (init, 0))
_, count = result
return count
cloop = api.jit(loop)
limit = 10
effect = [False]
self.assertEqual(loop(2, limit, 1), limit - 2)
assert effect[0]
effect[0] = False
self.assertEqual(cloop(2, limit, 1), limit - 2)
assert effect[0]
effect[0] = False
self.assertEqual(cloop(2, limit, 1), limit - 2)
self.assertEqual(cloop(3, limit, 1), limit - 3)
assert not effect[0]
def testWhileTypeErrors(self):
"""Test typing error messages for while."""
tuple_treedef = tree_util.tree_structure((1., 1.))
leaf_treedef = tree_util.tree_structure(0.)
with self.assertRaisesRegex(TypeError,
re.escape(f"cond_fun must return a boolean scalar, but got pytree {tuple_treedef}.")):
lax.while_loop(lambda c: (1., 1.), lambda c: c, 0.)
with self.assertRaisesRegex(TypeError,
re.escape("cond_fun must return a boolean scalar, but got output type(s) [ShapedArray(float32[])].")):
lax.while_loop(lambda c: np.float32(1.), lambda c: c, np.float32(0.))
with self.assertRaisesRegex(TypeError,
re.escape("body_fun output and input must have same type structure, "
f"got {tuple_treedef} and {leaf_treedef}.")):
lax.while_loop(lambda c: True, lambda c: (1., 1.), 0.)
with self.assertRaisesWithLiteralMatch(TypeError,
("body_fun output and input must have identical types, got\n"
"ShapedArray(bool[], weak_type=True)\n"
"and\n"
"ShapedArray(float32[]).")):
lax.while_loop(lambda c: True, lambda c: True, np.float32(0.))
def testNestedWhileWithDynamicUpdateSlice(self):
num = 5
def update_entry(arr, val, i, j):
val = lax.reshape(val, [1, 1])
return lax.dynamic_update_slice(arr, val, (i, j))
def outer_loop(arr): # pylint: disable=missing-docstring
def cond_fun(state):
i, num, _, _ = state
return lax.lt(i, num)
def body_fun(state):
i, num, arr, out = state
return (lax.add(i, 1), num, arr, inner_loop(i, arr, out))
out = np.zeros(arr.shape, dtype=arr.dtype)
init_val = (0, num, arr, out)
_, _, _, out = lax.while_loop(cond_fun, body_fun, init_val)
return out
def inner_loop(i, arr, out): # pylint: disable=missing-docstring
def cond_fun(state):
i, j, _, _ = state
return lax.le(j, i)
def body_fun(state):
i, j, arr, out = state
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
arr_i_j = lax.dynamic_index_in_dim(arr_i, j, 0, False)
out = update_entry(out, arr_i_j, i, j)
return (i, lax.add(j, 1), arr, out)
init_val = (i, 0, arr, out)
_, _, _, out = lax.while_loop(cond_fun, body_fun, init_val)
return out
cloop = api.jit(outer_loop)
arr = npr.RandomState(0).randn(5, 5)
self.assertAllClose(outer_loop(arr), np.tril(arr), check_dtypes=False)
self.assertAllClose(cloop(arr), np.tril(arr), check_dtypes=False)
self.assertAllClose(cloop(arr), np.tril(arr), check_dtypes=False)
def testLoopWithConjunctionCondition(self):
def sum_first_n(arr, num): # pylint: disable=missing-docstring
def cond_fun(state):
arr, num, i, _ = state
return lax.bitwise_and(lax.lt(i, num), lax.lt(i, arr.shape[0]))
def body_fun(state):
arr, num, i, total = state
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
return (arr, num, lax.add(i, 1), lax.add(total, arr_i))
init_val = (arr, num, 0, 0.)
_, _, _, total = lax.while_loop(cond_fun, body_fun, init_val)
return total
cfun = api.jit(sum_first_n)
x = npr.RandomState(0).randn(10).astype(jnp.float_)
for num in [0, 5, 10, 15]:
self.assertAllClose(sum_first_n(x, num), np.sum(x[:num]),
check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
def testWhileLoopBatched(self):
def fun(x):
return lax.while_loop(lambda x: x < 3, lambda x: x + 2, x)
ans = api.vmap(fun)(np.array([0, 1, 2, 3]))
expected = np.array([4, 3, 4, 3])
self.assertAllClose(ans, expected, check_dtypes=False)
fun = api.jit(fun)
ans = api.vmap(fun)(np.array([0, 1, 2, 3]))
expected = np.array([4, 3, 4, 3])
self.assertAllClose(ans, expected, check_dtypes=False)
def testWhileLoopAxisIndexBatched(self):
def fun(x):
return lax.while_loop(lambda x: x < lax.axis_index('i'), lambda x: x + 2, x)
ans = api.vmap(fun, axis_name='i')(np.array([0, 0, 0, 0]))
expected = np.array([0, 2, 2, 4])
self.assertAllClose(ans, expected, check_dtypes=False)
fun = api.jit(fun)
ans = api.vmap(fun, axis_name='i')(np.array([0, 0, 0, 0]))
expected = np.array([0, 2, 2, 4])
self.assertAllClose(ans, expected, check_dtypes=False)
def testWhileLoopCondConstsBatched(self):
def fun(x, y):
return lax.while_loop(lambda x: x < y, lambda x: x + 2, x)
ans = api.vmap(fun, in_axes=(None, 0))(0, np.array([2, 3]))
expected = np.array([2, 4])
self.assertAllClose(ans, expected, check_dtypes=False)
def testWhileLoopBodyConstsBatched(self):
def fun(x, y):
return lax.while_loop(lambda x: x < 3, lambda x: x + y, x)
ans = api.vmap(fun, in_axes=(None, 0))(0, jnp.array([2, 3]))
expected = np.array([4, 3])
self.assertAllClose(ans, expected, check_dtypes=False)
def testWhileLoopTupleBatched(self):
def cond_fun(loop_carry):
x, y = loop_carry
return x + y < 5
def body_fun(loop_carry):
x, y = loop_carry
x = x + 1
return x, y
def fun(x, y):
return lax.while_loop(cond_fun, body_fun, (x, y))
ans = api.vmap(fun)(np.array([0, 0]), np.array([1, 2]))
expected = (np.array([4, 3]), np.array([1, 2]))
self.assertAllClose(ans, expected, check_dtypes=False)
def test_issue_3204(self):
# Error during XLA code generation for vmap of nested loops
def test(a, b):
val = 0
i = 0
j = 0
condfun_1 = lambda inp: inp[1] < a + 1
condfun_2 = lambda inp: inp[2] < b + 1
def bodyfun_1(inp):
val, i, j = inp
j = 0
def bodyfun_2(inp):
val, i, j = inp
val += i + j
j += 1
return (val, i, j)
result = lax.while_loop(condfun_2, bodyfun_2, (val, i, j))
val = result[0]
i += 1
return (val, i, j)
result = lax.while_loop(condfun_1, bodyfun_1, (val, i, j))
return result[0]
arr = np.arange(5)
vmap_test = api.vmap(test, (0, 0))
vmap_test(arr, arr)
def testForiLoopErrors(self):
"""Test typing error messages for while."""
with self.assertRaisesRegex(
TypeError, "arguments to fori_loop must have equal types"):
lax.fori_loop(np.int16(0), jnp.int32(10), (lambda i, c: c), jnp.float32(7))
def testForiLoopBatched(self):
def body_fun(i, loop_carry):
x, y = loop_carry
x = x + 1
y = y + 2
return x, y
def fun(x):
return lax.fori_loop(0, 10, body_fun, (x, 0))
ans = api.vmap(fun)(np.array([0, 1]))
expected = (np.array([10, 11]), np.array([20, 20]))
self.assertAllClose(ans, expected, check_dtypes=False)
def testForiLoopBatchedIssue1190(self):
cond_fun = lambda carry: carry[0] < 4
body_fun = lambda carry: (carry[0] + 1, carry[1] + 1)
f = lambda x: lax.while_loop(cond_fun, body_fun, (0, x))
jaxpr = api.make_jaxpr(api.vmap(f))(jnp.arange(3))
eqn = jaxpr.jaxpr.eqns[0]
self.assertIs(eqn.primitive, lax.while_p)
self.assertEqual(eqn.params['cond_jaxpr'].in_avals[0].shape, ())
def testForiLoopBasic(self):
def body_fun(i, tot):
return lax.add(tot, i)
def count(num):
return lax.fori_loop(0, num, body_fun, 0)
self.assertEqual(count(2), 1)
self.assertEqual(count(3), 3)
self.assertEqual(count(4), 6)
for args_maker in [lambda: [2], lambda: [3], lambda: [4]]:
self._CompileAndCheck(count, args_maker)
def testForiLoopClosure(self):
def count(num):
def body_fun(i, tot):
return lax.add(num, lax.add(tot, i))
return lax.fori_loop(0, num, body_fun, 0)
cfun = api.jit(count)
self.assertEqual(count(2), 1 + 2**2)
self.assertEqual(count(2), cfun(2))
self.assertEqual(count(3), 3 + 3**2)
self.assertEqual(count(3), cfun(3))
self.assertEqual(count(4), 6 + 4**2)
self.assertEqual(count(4), cfun(4))
def testForiLoopTupleState(self):
def sum_first_n(arr, num):
def body_fun(i, state):
arr, total = state
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
return (arr, lax.add(total, arr_i))
init_val = (arr, 0.)
_, total = lax.fori_loop(0, lax.min(arr.shape[0], num), body_fun,
init_val)
return total
cfun = api.jit(sum_first_n)
x = npr.RandomState(0).randn(10).astype(jnp.float_)
for num in [0, 5, 10, 15]:
self.assertAllClose(sum_first_n(x, num), np.sum(x[:num]),
check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
def testForiLoopDictState(self):
def sum_first_n(arr, num):
def body_fun(i, state):
arr, total = state['arr'], state['total']
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
return {'arr': arr, 'total': lax.add(total, arr_i)}
init_val = {'arr': arr, 'total': 0.}
out_val = lax.fori_loop(0, lax.min(arr.shape[0], num), body_fun, init_val)
return out_val['total']
cfun = api.jit(sum_first_n)
x = npr.RandomState(0).randn(10).astype(jnp.float_)
for num in [0, 5, 10, 15]:
self.assertAllClose(sum_first_n(x, num), np.sum(x[:num]),
check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
def testForiLoopEmptyTupleInState(self):
def sum_first_n(arr, num):
def body_fun(i, state):
arr, total, _ = state
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
return (arr, lax.add(total, arr_i), ())
init_val = (arr, 0., ())
_, tot, _ = lax.fori_loop(0, lax.min(arr.shape[0], num), body_fun, init_val)
return tot
cfun = api.jit(sum_first_n)
x = npr.RandomState(0).randn(10).astype(jnp.float_)
for num in [0, 5, 10, 15]:
self.assertAllClose(sum_first_n(x, num), np.sum(x[:num]),
check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
def testCond(self):
def fun(x):
if x < 3:
return (x, x)
else:
y = lax.mul(2, x)
return y, lax.mul(2, y)
@api.jit
def cfun(x):
def false_fun(x):
y = lax.mul(2, x)
return y, lax.mul(2, y)
return lax.cond(lax.lt(x, 3), lambda x: (x, x), false_fun, x)
self.assertEqual(fun(0), cfun(0))
self.assertEqual(fun(0), (0, 0))
self.assertEqual(fun(1), cfun(1))
self.assertEqual(fun(1), (1, 1))
self.assertEqual(fun(2), cfun(2))
self.assertEqual(fun(2), (2, 2))
self.assertEqual(fun(3), cfun(3))
self.assertEqual(fun(3), (6, 12))
self.assertEqual(fun(4), cfun(4))
self.assertEqual(fun(4), (8, 16))
def testSwitch(self):
def branch(x):
y = lax.mul(2, x)
return y, lax.mul(2, y)
branches = [lambda x: (x, x),
branch,
lambda x: (x, -x)]
def fun(x):
if x <= 0:
return branches[0](x)
elif x == 1:
return branches[1](x)
else:
return branches[2](x)
def cfun(x):
return lax.switch(x, branches, x)
self.assertEqual(fun(-1), cfun(-1))
self.assertEqual(fun(0), cfun(0))
self.assertEqual(fun(1), cfun(1))
self.assertEqual(fun(2), cfun(2))
self.assertEqual(fun(3), cfun(3))
cfun = api.jit(cfun)
self.assertEqual(fun(-1), cfun(-1))
self.assertEqual(fun(0), cfun(0))
self.assertEqual(fun(1), cfun(1))
self.assertEqual(fun(2), cfun(2))
self.assertEqual(fun(3), cfun(3))
def testSwitchResidualsMerge(self):
def get_conds(fun):
jaxpr = api.make_jaxpr(api.grad(fun))(0., 0)
return [eqn for eqn in jaxpr.jaxpr.eqns if eqn.primitive.name == 'cond']
def branch_invars_len(cond_eqn):
lens = [len(jaxpr.jaxpr.invars) for jaxpr in cond_eqn.params['branches']]
assert len(set(lens)) == 1
return lens[0]
def branch_outvars_len(cond_eqn):
lens = [len(jaxpr.jaxpr.outvars) for jaxpr in cond_eqn.params['branches']]
assert len(set(lens)) == 1
return lens[0]
branches1 = [
lambda x: jnp.sin(x),
lambda x: jnp.cos(x)] # branch residuals overlap, should be reused
branches2 = branches1 + [
lambda x: jnp.sinh(x)] # another overlapping residual, expect reuse
branches3 = branches2 + [
lambda x: jnp.sin(x) + jnp.cos(x)] # requires one more residual slot
def fun1(x, i):
return lax.switch(i + 1, branches1, x)
def fun2(x, i):
return lax.switch(i + 1, branches2, x)
def fun3(x, i):
return lax.switch(i + 1, branches3, x)
fwd1, bwd1 = get_conds(fun1)
fwd2, bwd2 = get_conds(fun2)
fwd3, bwd3 = get_conds(fun3)
fwd1_num_out = branch_outvars_len(fwd1)
fwd2_num_out = branch_outvars_len(fwd2)
fwd3_num_out = branch_outvars_len(fwd3)
assert fwd1_num_out == fwd2_num_out
assert fwd3_num_out == fwd2_num_out + 1
bwd1_num_in = branch_invars_len(bwd1)
bwd2_num_in = branch_invars_len(bwd2)
bwd3_num_in = branch_invars_len(bwd3)
assert bwd1_num_in == bwd2_num_in
assert bwd3_num_in == bwd2_num_in + 1
def testOneBranchSwitch(self):
branch = lambda x: -x
f = lambda i, x: lax.switch(i, [branch], x)
x = 7.
self.assertEqual(f(-1, x), branch(x))
self.assertEqual(f(0, x), branch(x))
self.assertEqual(f(1, x), branch(x))
cf = api.jit(f)
self.assertEqual(cf(-1, x), branch(x))
self.assertEqual(cf(0, x), branch(x))
self.assertEqual(cf(1, x), branch(x))
cf = api.jit(f, static_argnums=0)
self.assertEqual(cf(-1, x), branch(x))
self.assertEqual(cf(0, x), branch(x))
self.assertEqual(cf(1, x), branch(x))
def testIssue1379(self):
def fun(pred):
return lax.cond(pred, lambda x: (True, x), lambda x: (False, x), pred)
@api.jit
def cfun(pred):
return fun(pred)
self.assertEqual(fun(0), cfun(0), (False,0))
self.assertEqual(fun(0.), cfun(0.), (False,0.))
self.assertEqual(fun(1), cfun(1), (True,1))
self.assertEqual(fun(1.), cfun(1.), (True,1.))
# test that proper errors are raised for wrong types
for pred in ["abc", [], [1,2]]:
for f in [fun, cfun]:
self.assertRaises(TypeError, f, pred)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testNestedCond(self, cond):
def fun(x):
if x < 2:
return lax.mul(2, x)
else:
if x < 5:
return lax.mul(3, x)
else:
return lax.mul(4, x)
@api.jit
def cfun(x):
return cond(
lax.lt(x, 2),
lambda x: lax.mul(2, x),
lambda x: cond(lax.lt(x, 5),
x, lambda x: lax.mul(3, x),
4, lambda y: lax.mul(y, x)),
x)
self.assertEqual(cfun(1), 2)
self.assertEqual(cfun(3), 9)
self.assertEqual(cfun(6), 24)
self.assertEqual(cfun(1), fun(1))
self.assertEqual(cfun(3), fun(3))
self.assertEqual(cfun(6), fun(6))
def testCondTypeErrors(self):
"""Test typing error messages for cond."""
with self.assertRaisesRegex(TypeError,
re.escape("Pred type must be either boolean or number, got <function")):
lax.cond(lambda x: True, lambda top: 2., lambda fop: 3., 1.)
with self.assertRaisesRegex(TypeError,
re.escape("Pred must be a scalar, got foo of type <class 'str'>")):
lax.cond("foo", lambda top: 2., lambda fop: 3., 1.)
with self.assertRaisesRegex(TypeError,
re.escape("Pred must be a scalar, got (1.0, 1.0) of type <class 'tuple'>")):
lax.cond((1., 1.), lambda top: 2., lambda fop: 3., 1.)
with self.assertRaisesRegex(TypeError,
re.escape("true_fun and false_fun output must have same type structure, "
f"got {tree_util.tree_structure(2.)} and {tree_util.tree_structure((3., 3.))}.")):
lax.cond(True, lambda top: 2., lambda fop: (3., 3.), 1.)
with self.assertRaisesRegex(
TypeError, textwrap.dedent(
r"""
true_fun and false_fun output must have identical types, got
ShapedArray\(float32\[1\]\)
and
ShapedArray\(float32\[\].*\).""").strip()):
lax.cond(True,
lambda top: jnp.array([1.], jnp.float32),
lambda fop: jnp.float32(1.),
1.)
def testSwitchErrors(self):
"""Test typing error messages for switch."""
with self.assertRaisesRegex(TypeError,
re.escape("Index type must be an integer, got <function")):
lax.switch(lambda x: True, [lambda _: 2., lambda _: 3.], 1.)
with self.assertRaisesRegex(TypeError,
re.escape("Index type must be an integer, got foo.")):
lax.switch("foo", [lambda _: 2., lambda _: 3.], 1.)
with self.assertRaisesRegex(TypeError,
re.escape("Branch index must be scalar, got (1.0, 1.0) of shape (2,).")):
lax.switch((1., 1.), [lambda _: 2., lambda _: 3.], 1.)
with self.assertRaisesRegex(ValueError,
re.escape("Empty branch sequence")):
lax.switch(0, [], 1.)
with self.assertRaisesRegex(TypeError,
re.escape("branch 0 and 1 outputs must have same type structure, "
f"got {tree_util.tree_structure(2.)} and {tree_util.tree_structure((3., 3.))}.")):
lax.switch(1, [lambda _: 2., lambda _: (3., 3.)], 1.)
with self.assertRaisesRegex(
TypeError, textwrap.dedent(
r"""
branch 0 and 1 outputs must have identical types, got
ShapedArray\(float32\[1\]\)
and
ShapedArray\(float32\[\].*\).""").strip()):
lax.switch(1, [lambda _: jnp.array([1.], jnp.float32),
lambda _: jnp.float32(1.)],
1.)
def testCondOneBranchConstant(self):
def fun(x):
if x < 3:
return 5.
else:
return x
@api.jit
def cfun(x):
return lax.cond(lax.lt(x, 3), lambda x: 5, lambda x: x, x)
self.assertEqual(fun(0), cfun(0))
self.assertEqual(cfun(0), 5)
self.assertEqual(fun(4), cfun(4))
self.assertEqual(cfun(4), 4)
def testCondOneBranchConstantTuple(self):
def fun(x):
if x < 3:
return (1., 2., 3.)
else:
return (x, 2., 4.)
@api.jit
def cfun(x):
return lax.cond(lax.lt(x, 3),
lambda x: (1, 2., 3.),
lambda x: (x, 2., 4.),
x)
self.assertEqual(fun(0), cfun(0))
self.assertEqual(cfun(0), (1, 2., 3.))
self.assertEqual(fun(4), cfun(4))
self.assertEqual(cfun(4), (4, 2., 4.))
def testCondBatched(self):
def fun(x, y, z):
pred = lax.lt(x, 3)
true_fun = lambda y: y
false_fun = lambda z: lax.neg(z)
return lax.cond(pred, y, true_fun, z, false_fun)
# these cases stay as cond
x = jnp.array(2)
y = jnp.array([1, 2])
z = jnp.array([3, 4])
ans = api.vmap(fun, (None, 0, 0))(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, 0)))(x, y, z)
expected = np.array([1, 2])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
x = jnp.array(4)
ans = api.vmap(fun, (None, 0, 0))(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, 0)))(x, y, z)
expected = np.array([-3, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
fun = api.jit(fun)
ans = api.vmap(fun, (None, 0, 0))(x, y, z)
expected = np.array([-3, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
z = jnp.array(5)
ans = api.vmap(fun, (None, 0, None))(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, None)))(x, y, z)
expected = np.array([-5, -5])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
# these cases become select
x = jnp.array([2, 4])
ans = api.vmap(fun, (0, 0, None))(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun, (0, 0, None)))(x, y, z)
expected = np.array([1, -5])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" in str(jaxpr)
z = jnp.array([3, 4])
ans = api.vmap(fun)(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun))(x, y, z)
expected = np.array([1, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" in str(jaxpr)
def testSwitchBatched(self):
def fun(index, x, y, z):
branches = [lambda xyz: xyz[0],
lambda xyz: lax.neg(xyz[1]),
lambda xyz: lax.sign(xyz[2])]
return lax.switch(index, branches, (x, y, z))
# these cases stay as cond
x = jnp.array(0)
y = jnp.array([1, 2])
z = jnp.array([3, 4])
w = jnp.array(9)
ans = api.vmap(fun, (None, 0, 0, None))(x, y, z, w)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, 0, None)))(x, y, z, w)
expected = np.array([1, 2])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
x = jnp.array(1)
ans = api.vmap(fun, (None, 0, 0, None))(x, y, z, w)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, 0, None)))(x, y, z, w)
expected = np.array([-3, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
fun = api.jit(fun)
ans = api.vmap(fun, (None, 0, 0, None))(x, y, z, w)
expected = np.array([-3, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
z = jnp.array(5)
ans = api.vmap(fun, (None, 0, None, None))(x, y, z, w)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, None, None)))(x, y, z, w)
expected = np.array([-5, -5])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
# these cases become select
x = jnp.array([0, 1])
ans = api.vmap(fun, (0, 0, None, None))(x, y, z, w)
jaxpr = api.make_jaxpr(api.vmap(fun, (0, 0, None, None)))(x, y, z, w)
expected = np.array([1, -5])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" in str(jaxpr)
z = jnp.array([3, 4])
w = jnp.array([9, 9])
ans = api.vmap(fun)(x, y, z, w)
jaxpr = api.make_jaxpr(api.vmap(fun))(x, y, z, w)
expected = np.array([1, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" in str(jaxpr)
def testCondJVP(self):
def fun_ref(x):
if x < 3:
return (x, x)
else:
y = 2 * x
return y, 2 * y
def fun(x):
def false_fun(x):
y = 2 * x
return y, 2 * y
return lax.cond(x < 3, lambda x: (x, x), false_fun, x)
x = 3.14
ans = api.jvp(fun, (x,), (x,))
expected = api.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
x = 2.72
ans = api.jvp(fun, (x,), (x,))
expected = api.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
def testSwitchJVP(self):
def branch(x):
y = 2 * x
return y, 2 * y
branches = [lambda x: (x, x),
branch,
lambda x: (x, -x)]
def fun_ref(x):
idx = x // 1
if idx <= 0:
return branches[0](x)
elif idx == 1:
return branches[1](x)
else:
return branches[2](x)
def fun(x):
idx = lax.convert_element_type(x // 1, np.int32)
return lax.switch(idx, branches, x)
for x in [-0.7, 0.7, 1.7, 2.7, 3.7]:
ans = api.jvp(fun, (x,), (x,))
expected = api.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondJVP2(self, cond):
def fun_ref(x):
if x < 3:
return 2.
else:
return 2. * x
def fun(x):
return cond(x < 3, (), lambda _: 2., x, lambda x: 2. * x)
x = 3.14
ans = api.jvp(fun, (x,), (x,))
expected = api.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
x = 2.72
ans = api.jvp(fun, (x,), (x,))
expected = api.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
def testCondGrad(self):
def f_ref(x):
return 3. * x if x < 2 else jnp.sin(x)
def f(x):
return lax.cond(x < 2, lambda x: 3. * x, lambda x: jnp.sin(x), x)
x = 2.14
ans = api.grad(f)(x)
expected = api.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"])
x = 1.72
ans = api.grad(f)(x)
expected = api.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"])
def testCondGradVmapNan(self):
eps = 1e-3
def safe1(x):
return lax.cond(x < eps, lambda _: eps, lambda _: jnp.sqrt(x), ())
out = api.grad(lambda x: api.vmap(safe1)(x).sum())(np.zeros(10))
self.assertFalse(np.isnan(out).any())
def testSwitchGrad(self):
branches = [lambda x: 3. * x,
lambda x: jnp.sin(x),
lambda x: -x]
def f_ref(x):
idx = x // 1
if idx <= 0:
return branches[0](x)
elif idx == 1:
return branches[1](x)
else:
return branches[2](x)
def f(x):
idx = lax.convert_element_type(x // 1, np.int32)
return lax.switch(idx, branches, x)
for x in [-0.7, 0.7, 1.7, 2.7, 3.7]:
ans = api.grad(f)(x)
expected = api.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"])
def testSwitchGradWithWeakTypeMismatch(self): # issue #4696, PR #4896
dtype = jnp.ones(1).dtype
dtype = jnp.float32 if dtype == jnp.float32 else jnp.float64
branches = [
lambda x: x, # This preserves the weak type of x.
lambda x: x + dtype(1), # This strips the weak type of x.
]
def f_ref(x):
i = x.astype(jnp.int32)
return branches[i](x)
def f(x):
return lax.switch(x.astype(jnp.int32), branches, x)
for x in [0., 1.]:
ans = api.grad(f)(x)
expected = api.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondGrad2(self, cond):
def f_ref(x):
z = jnp.array([1., 2.]) * x if x[0] < 2 else jnp.sin(x)
return z.sum()
def _f(x):
return cond(
x[0] < 2,
lambda x: jnp.array([1., 2.]) * x,
lambda x: jnp.sin(x),
x)
f = lambda x: api.jit(_f)(x).sum()
x = 2.14 * jnp.ones(2)
ans = api.grad(f)(x)
expected = api.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"])
x = 1.72 * jnp.ones(2)
ans = api.grad(f)(x)
expected = api.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"],
rtol={jnp.float32: 1e-2, jnp.float64: 2e-3})
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondGrad3(self, cond):
def fun_ref(x):
if x < 3:
return 2.
else:
return 2. * x
def fun(x):
return cond(x < 3, (), lambda _: 2., x, lambda x: 2. * x)
x = 3.14
ans = api.grad(fun)(x)
expected = api.grad(fun_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd", "rev"])
x = 2.72
ans = api.grad(fun)(x)
expected = api.grad(fun_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd", "rev"])
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondGrad4(self, cond):
def fun_ref(x, y):
if x < 3:
return 2. * jnp.sin(y)
else:
return 2. * jnp.cos(x)
def fun(x, y):
return cond(
x < 3,
(), lambda _: 2. * jnp.sin(y),
x, lambda x: 2. * x)
y = 5.8
x = 3.14
ans = api.grad(fun, 1)(x, y)
expected = api.grad(fun_ref, 1)(x, y)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x, y), order=2, modes=["fwd", "rev"])
x = 2.72
ans = api.grad(fun, 1)(x, y)
expected = api.grad(fun_ref, 1)(x, y)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x, y), order=2, modes=["fwd", "rev"])
def testCondLinearize(self):
def f(x):
return lax.cond(x < 2, lambda x: 3. * x, lambda x: jnp.sin(x), x)
y, f_lin = api.linearize(f, 1.)
self.assertAllClose(y, 3., check_dtypes=False)
self.assertAllClose(f_lin(2.), 6., check_dtypes=False)
y, f_lin = api.linearize(f, 4.)
self.assertAllClose(y, jnp.sin(4.), check_dtypes=False)
self.assertAllClose(f_lin(2.), jnp.cos(4.) * 2., check_dtypes=False)
def testSwitchLinearize(self):
branches = [lambda x: 3. * x,
lambda x: jnp.sin(x),
lambda x: -x]
def f(x):
idx = lax.convert_element_type(x // 1, np.int32)
return lax.switch(idx, branches, x)
# branch 0
y, f_lin = api.linearize(f, -1.)
self.assertAllClose(y, -3., check_dtypes=False)
self.assertAllClose(f_lin(2.), 6., check_dtypes=False)
y, f_lin = api.linearize(f, 0.)
self.assertAllClose(y, 0., check_dtypes=False)
self.assertAllClose(f_lin(2.), 6., check_dtypes=False)
# branch 1
y, f_lin = api.linearize(f, 1.)
self.assertAllClose(y, jnp.sin(1.), check_dtypes=False)
self.assertAllClose(f_lin(2.), jnp.cos(1.) * 2., check_dtypes=False)
# branch 2
y, f_lin = api.linearize(f, 2.)
self.assertAllClose(y, -2., check_dtypes=False)
self.assertAllClose(f_lin(2.), -2., check_dtypes=False)
y, f_lin = api.linearize(f, 3.)
self.assertAllClose(y, -3., check_dtypes=False)
self.assertAllClose(f_lin(2.), -2., check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondLinearize2(self, cond):
def f_ref(x):
z = jnp.array([1., 2.]) * x if x[0] < 2 else jnp.cos(jnp.sin(x))
return z.sum()
def f(x):
return cond(
x[0] < 2,
lambda x: jnp.array([1., 2.]) * x,
lambda x: jnp.cos(jnp.sin(x)),
x).sum()
x = 2.14 * jnp.ones(2)
y, f_lin = api.linearize(f, x)
y_ref, f_lin_ref = api.linearize(f_ref, x)
self.assertAllClose(y, y_ref, check_dtypes=False)
self.assertAllClose(f_lin(x), f_lin_ref(x), check_dtypes=False)
x = -2.14 * jnp.ones(2)
y, f_lin = api.linearize(f, x)
y_ref, f_lin_ref = api.linearize(f_ref, x)
self.assertAllClose(y, y_ref, check_dtypes=False)
self.assertAllClose(f_lin(x), f_lin_ref(x), check_dtypes=False)
f = api.jit(f)
x = 2.14 * jnp.ones(2)
y, f_lin = api.linearize(f, x)
y_ref, f_lin_ref = api.linearize(f_ref, x)
self.assertAllClose(y, y_ref, check_dtypes=False)
self.assertAllClose(f_lin(x), f_lin_ref(x), check_dtypes=False)
def testCondJit(self):
def f(x):
return lax.cond(x < 2, lambda x: 3. * x, lambda x: jnp.sin(x), x)
y = api.jit(f)(1.)
expected = f(1.)
self.assertAllClose(y, expected, check_dtypes=False)
y = api.jit(f)(4.)
expected = f(4.)
self.assertAllClose(y, expected, check_dtypes=False)
def testSwitchJit(self):
branches = [lambda x: 3. * x,
lambda x: jnp.sin(x),
lambda x: -x]
def f(x):
idx = lax.convert_element_type(x // 1, np.int32)
return lax.switch(idx, branches, x)
for x in [-1., 0., 1., 2., 3.]:
y = api.jit(f)(x)
expected = f(x)
self.assertAllClose(y, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondJitDisabled(self, cond):
def f_ref(x):
return 3. * x if x < 2 else jnp.sin(x)
def f(x):
return cond(x < 2, lambda x: 3. * x, lambda x: jnp.sin(x), x)
with api.disable_jit():
y = f(1.)
expected = f_ref(1.)
self.assertAllClose(y, expected, check_dtypes=False)
with api.disable_jit():
y = api.jit(f)(1.)
expected = f(1.)
self.assertAllClose(y, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondWithConsts(self, cond):
def f(x):
return cond(x < 2,
lambda x: np.array([1., 2.]) * x,
lambda x: np.array([3., 4.]) * jnp.sin(x),
x)
def f_ref(x):
if x < 2:
return np.array([1., 2.]) * x
else:
return np.array([3., 4.]) * np.sin(x)
y = f(1.)
expected = f_ref(1.)
self.assertAllClose(y, expected, check_dtypes=False)
y = f(4.)
expected = f_ref(4.)
self.assertAllClose(y, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondJitWithConsts(self, cond):
def f(x):
return cond(x < 2,
lambda x: np.array([1., 2.]) * x,
lambda x: np.array([3., 4.]) * jnp.sin(x),
x)
y = api.jit(f)(1.)
expected = f(1.)
self.assertAllClose(y, expected, check_dtypes=False)
y = api.jit(f)(4.)
expected = f(4.)
self.assertAllClose(y, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondVmapGrad(self, cond):
# https://github.com/google/jax/issues/2264
def f_1(x): return x ** 2
def f_2(x): return x ** 3
def f(x): return cond(x > 0, f_1, f_2, x)
def g(x): return jnp.where(x > 0, f_1(x), f_2(x))
x = jnp.linspace(-1, 1, 20)
ans = api.vmap(api.grad(f))(x)
expected = api.vmap(api.grad(g))(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testIssue1263(self):
def f(rng, x):
cond = random.bernoulli(rng)
return lax.cond(cond, x, lambda x: x, jnp.abs(x) - 1., lambda x: x)
def body_fn(i, state):
rng, x = state
key, subkey = random.split(rng)
return key, f(subkey, x)
def g(rng, x):
return lax.fori_loop(0, 10, body_fn, (rng, x))
api.vmap(g)(random.split(random.PRNGKey(0), 3), jnp.ones((3, 4)))
def testIssue514(self):
# just check this doesn't crash
lax.cond(True,
(0, 0), lambda x: (x[0], 0),
(1, 1), lambda x: x)
def testIssue649(self):
from jax import lax
def body(x):
a, b = x
return (7, b + 1)
def cond(x):
a, b = x
return b < 10
out = lax.while_loop(cond, body, (33, 4))
self.assertEqual(out, (7, 10))
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}_impl={}".format(
jit_scan, jit_f, scan_name),
"jit_scan": jit_scan, "jit_f": jit_f, "scan": scan_impl}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS)
def testScanImpl(self, jit_scan, jit_f, scan):
rng = np.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(jnp.tan(d)))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(scan, static_argnums=(0,))
as_ = rng.randn(5, 3)
c = rng.randn(4)
ans = scan(f, c, as_)
expected = scan_reference(f, c, as_)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}_impl={}".format(
jit_scan, jit_f, scan_name),
"jit_scan": jit_scan, "jit_f": jit_f, "scan": scan_impl}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS)
def testScanJVP(self, jit_scan, jit_f, scan):
rng = np.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(jnp.tan(d)))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(scan, static_argnums=(0,))
as_ = rng.randn(5, 3)
c = rng.randn(4)
ans = api.jvp( lambda c, as_: scan(f, c, as_), (c, as_), (c, as_))
expected = api.jvp(lambda c, as_: scan_reference(f, c, as_), (c, as_), (c, as_))
self.assertAllClose(ans, expected, check_dtypes=False,
rtol={np.float64: 1e-14, np.float32: 1e-5})
jtu.check_grads(partial(scan, f), (c, as_), order=2, modes=["fwd"])
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}_impl={}".format(
jit_scan, jit_f, scan_name),
"jit_scan": jit_scan, "jit_f": jit_f, "scan": scan_impl}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS)
def testScanLinearize(self, jit_scan, jit_f, scan):
rng = np.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(jnp.tan(d)))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(scan, static_argnums=(0,))
as_ = rng.randn(5, 3)
c = rng.randn(4)
ans = api.linearize(lambda c, as_: scan(f, c, as_), c, as_)[1](c, as_)
expected = api.linearize(lambda c, as_: scan_reference(f, c, as_), c, as_)[1](c, as_)
self.assertAllClose(ans, expected, check_dtypes=False,
rtol={np.float64: 1e-14})
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}_impl={}".format(
jit_scan, jit_f, scan_name),
"jit_scan": jit_scan, "jit_f": jit_f, "scan": scan_impl}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testScanGrad(self, jit_scan, jit_f, scan):
rng = np.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.sum(jnp.sin(a)) + jnp.sum(jnp.sin(c)) + jnp.sum(jnp.sin(d))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(scan, static_argnums=(0,))
as_ = rng.randn(5, 3)
c = rng.randn(4)
ans = api.grad(lambda c, as_: list( scan(f, c, as_))[0].sum())(c, as_)
expected = api.grad(lambda c, as_: list(scan_reference(f, c, as_))[0].sum())(c, as_)
self.assertAllClose(ans, expected, check_dtypes=False,
rtol={np.float32: 2e-5, np.float64: 1e-13})
jtu.check_grads(partial(scan, f), (c, as_), order=2, modes=["rev"],
atol=1e-3, rtol=5e-3)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testScanRnn(self):
r = npr.RandomState(0)
n_in = 4
n_hid = 2
n_out = 1
length = 3
W_trans = r.randn(n_hid, n_hid + n_in).astype(jnp.float_)
W_out = r.randn(n_out, n_hid + n_in).astype(jnp.float_)
params = W_trans, W_out
inputs = r.randn(length, n_in).astype(jnp.float_)
targets = r.randn(length, n_out).astype(jnp.float_)
def step(params, state, input):
W_trans, W_out = params
stacked = jnp.concatenate([state, input])
output = jnp.tanh(jnp.dot(W_out, stacked))
next_state = jnp.tanh(jnp.dot(W_trans, stacked))
return next_state, output
def rnn(params, inputs):
init_state = jnp.zeros(n_hid)
_, outputs = lax.scan(partial(step, params), init_state, inputs)
return outputs
def loss(params, inputs, targets):
predictions = rnn(params, inputs)
return jnp.sum((predictions - targets)**2)
# evaluation doesn't crash
loss(params, inputs, targets)
# jvp evaluation doesn't crash
api.jvp(lambda params: loss(params, inputs, targets), (params,), (params,))
# jvp numerical check passes
jtu.check_grads(loss, (params, inputs, targets), order=2, modes=["fwd"],
rtol={np.float32: 2e-2, np.float64: 1e-6})
# linearize works
_, expected = api.jvp(loss, (params, inputs, targets),
(params, inputs, targets))
_, linfun = api.linearize(loss, params, inputs, targets)
ans = linfun(params, inputs, targets)
self.assertAllClose(ans, expected, check_dtypes=False)
# gradient evaluation doesn't crash
api.grad(loss)(params, inputs, targets)
# gradient check passes
jtu.check_grads(loss, (params, inputs, targets), order=2, rtol=2e-2)
# we can vmap to batch things
batch_size = 7
batched_inputs = r.randn(batch_size, length, n_in).astype(jnp.float_)
batched_targets = r.randn(batch_size, length, n_out).astype(jnp.float_)
batched_loss = api.vmap(lambda x, y: loss(params, x, y))
losses = batched_loss(batched_inputs, batched_targets)
expected = np.stack(list(map(lambda x, y: loss(params, x, y),
batched_inputs, batched_targets)))
self.assertAllClose(losses, expected, check_dtypes=False, rtol=1e-2)
def testIssue711(self):
# Tests reverse-mode differentiation through a scan for which the scanned
# function also involves reverse-mode differentiation.
# See https://github.com/google/jax/issues/711
def harmonic_bond(conf, params):
return jnp.sum(conf * params)
def minimize_structure(test_params):
energy_fn = partial(harmonic_bond, params=test_params)
def apply_carry(carry, _):
i, x = carry
new_x = x - 0.1 * api.grad(energy_fn)(x)
new_carry = (i+1, new_x)
return new_carry, _
x0 = jnp.array([1., 2., 3.])
carry_final, _ = lax.scan(apply_carry, (0, x0), jnp.zeros((75, 0)))
_, x_final = carry_final
return x_final
initial_params = 0.5
minimize_structure(initial_params) # doesn't crash
def loss(test_params):
x_final = minimize_structure(test_params)
return jnp.sum(jnp.sin(1.0 - x_final))
api.grad(loss)(0.25) # doesn't crash
def testIssue744(self):
Point = collections.namedtuple('Point', ['x', 'y'])
p0 = Point(x=jnp.array(1), y=jnp.array(2))
def plus_one(p, iter_idx):
return Point(p.x+1, p.y+1), iter_idx
self.assertRaisesRegex(
ValueError,
'scan got value with no leading axis to scan over.*',
lambda: lax.scan(plus_one, p0, list(range(5))))
def testScanTypeErrors(self):
"""Test typing error messages for scan."""
a = jnp.arange(5)
# Body output not a tuple
with self.assertRaisesRegex(TypeError,
re.escape("scan body output must be a pair, got ShapedArray(float32[]).")):
lax.scan(lambda c, x: np.float32(0.), 0, a)
with self.assertRaisesRegex(TypeError,
re.escape("scan carry output and input must have same type structure, "
f"got {tree_util.tree_structure((0, 0, 0,))} "
f"and {tree_util.tree_structure((1, (2, 3)))}")):
lax.scan(lambda c, x: ((0, 0, 0), x), (1, (2, 3)), a)
with self.assertRaisesRegex(TypeError,
re.escape("scan carry output and input must have same type structure, "
f"got {tree_util.tree_structure(a)} and {tree_util.tree_structure(None)}.")):
lax.scan(lambda c, x: (0, x), None, a)
with self.assertRaisesWithLiteralMatch(
TypeError,
"scan carry output and input must have identical types, got\n"
"ShapedArray(int32[])\n"
"and\n"
"ShapedArray(float32[])."):
lax.scan(lambda c, x: (np.int32(0), x), np.float32(1.0), a)
with self.assertRaisesRegex(TypeError,
re.escape("scan carry output and input must have same type structure, "
f"got {tree_util.tree_structure(a)} and {tree_util.tree_structure((1, 2))}.")):
lax.scan(lambda c, x: (0, x), (1, 2), a)
@parameterized.named_parameters(
{"testcase_name": "_{}".format(scan_name),
"scan": scan_impl}
for scan_impl, scan_name in SCAN_IMPLS)
def testScanHigherOrderDifferentiation(self, scan):
d = 0.75
def f(c, a):
b = jnp.sin(c * jnp.sum(jnp.cos(d * a)))
c = 0.9 * jnp.cos(d * jnp.sum(jnp.sin(c * a)))
return c, b
as_ = jnp.arange(6.).reshape((3, 2))
c = 1.
jtu.check_grads(lambda c, as_: scan(f, c, as_), (c, as_),
modes=["rev"], order=2, rtol={np.float32: 6e-3})
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}_in_axes={}_impl={}".format(
jit_scan, jit_f, in_axes, scan_name),
"jit_scan": jit_scan, "jit_f": jit_f, "in_axes": in_axes,
"scan": scan_impl}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS
for in_axes in itertools.product([None, 0, 1], [None, 0, 1, 2])
if in_axes != (None, None))
def testScanVmap(self, jit_scan, jit_f, in_axes, scan):
rng = np.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(jnp.tan(d)))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(scan, static_argnums=(0,))
as_shape = [5, 3]
c_shape = [4]
c_bdim, as_bdim = in_axes
if c_bdim is not None:
c_shape.insert(c_bdim, 7)
if as_bdim is not None:
as_shape.insert(as_bdim, 7)
as_ = rng.randn(*as_shape)
c = rng.randn(*c_shape)
ans = api.vmap(lambda c, as_: scan(f, c, as_), in_axes)(c, as_)
expected = api.vmap(lambda c, as_: scan_reference(f, c, as_), in_axes)(c, as_)
self.assertAllClose(ans, expected, check_dtypes=False,
rtol=1e-5, atol=1e-5)
def testScanVmapTuples(self):
def f(c, a):
a1, a2 = a
c1, c2 = c
b = jnp.sum(jnp.cos(a1)) * jnp.sum(jnp.tan(c2 * a2))
c = c1 * jnp.sin(jnp.sum(a1 * a2)), c2 * jnp.cos(jnp.sum(a1))
return c, b
in_axes = (0, (1, 2))
r = np.random.RandomState(0)
as_ = (r.randn(3, 7), r.randn(3, 4, 7))
c = (r.randn(7, 2), r.randn(7))
expected_c_out, expected_bs = [], []
for i in range(7):
c_out, bs = lax.scan(f, (c[0][i], c[1][i]), (as_[0][:,i], as_[1][:,:,i]))
expected_c_out.append(c_out)
expected_bs.append(bs)
expected_c_out_0, expected_c_out_1 = unzip2(expected_c_out)
expected_c_out = (jnp.stack(expected_c_out_0), jnp.stack(expected_c_out_1))
expected_bs = jnp.stack(expected_bs)
expected = expected_c_out, expected_bs
ans = api.vmap(lambda c, as_: lax.scan(f, c, as_), in_axes)(c, as_)
self.assertAllClose(ans, expected, check_dtypes=False)
def testScanVmapFixpoint(self):
def f(carry_init):
def scan_body(c, x):
# The carry is a 4-tuple, the last element starts batched,
# and the carry is shifted left at each iteration.
return ((c[1], c[2], c[3], 0.), None)
return lax.scan(scan_body, (0., 1., 2., carry_init), jnp.zeros(2))
carry_init = jnp.array([3., 4., 5.])
carry_out, _ = api.vmap(f)(carry_init)
self.assertAllClose(carry_out[3], jnp.array([0., 0., 0.]), check_dtypes=False)
self.assertAllClose(carry_out[2], jnp.array([0., 0., 0.]), check_dtypes = False)
# After two shifts, we get the carry_init
self.assertAllClose(carry_out[1], carry_init, check_dtypes=False)
self.assertAllClose(carry_out[0], jnp.array([2., 2., 2.]), check_dtypes = False)
def testIssue757(self):
# code from https://github.com/google/jax/issues/757
def fn(a):
return jnp.cos(a)
def loop(val):
iterations = 10
def apply_carry(x, i):
return api.grad(fn, argnums=(0,))(x)[0], i
final_val, _ = lax.scan(
apply_carry,
val,
jnp.arange(iterations)
)
return final_val
arg = 0.5
api.jit(api.jacfwd(loop, argnums=(0,)))(arg) # doesn't crash
def testIssue804(self):
num_devices = xla_bridge.device_count()
f = partial(lax.scan, lambda c, x: (c + lax.psum(x, "i") , c), 0.)
api.pmap(f, axis_name="i")(jnp.ones((num_devices, 4))) # doesn't crash
def testMap(self):
f = lambda x: x ** 2
xs = jnp.arange(10)
expected = xs ** 2
actual = lax.map(f, xs)
self.assertAllClose(actual, expected)
def testMapEmpty(self):
# https://github.com/google/jax/issues/2412
ans = lax.map(lambda x: x * x, jnp.array([]))
expected = jnp.array([])
self.assertAllClose(ans, expected)
def testCaching(self):
def cond(x):
assert python_should_be_executing
return x < 5
def body(x):
assert python_should_be_executing
return x + 2
python_should_be_executing = True
lax.while_loop(cond, body, 0)
python_should_be_executing = False
lax.while_loop(cond, body, 0)
def testCaching2(self):
# This second caching test shows a different kind of caching that we haven't
# implemented (but could!), namely that Python functions that are distinct
# objects but are equivalent functions trigger cache hits. This kind of
# caching could be salient when using lambda functions with control flow:
#
# lax.while_loop(lambda x: x < 5, lambda x: x + 2, 0)
# lax.while_loop(lambda x: x < 5, lambda x: x + 2, 0)
#
# To get a cache hit on the second line we'd need to form a jaxpr and
# compare them for equality (including the literals on identity). We could
# implement that by adding a __hash__/__eq__ to core.Jaxpr and
# core.ClosedJaxpr (see #1221).
raise SkipTest("not implemented")
def cond(x):
assert python_should_be_executing
return x < 5
def body(x):
assert python_should_be_executing
return x + 2
python_should_be_executing = True
lax.while_loop(cond, body, 0)
def cond(x):
assert python_should_be_executing
return x < 5
def body(x):
assert python_should_be_executing
return x + 2
python_should_be_executing = False
lax.while_loop(cond, body, 0)
def testWhileCondConstant(self):
out = lax.while_loop(lambda _: False, lambda _: (), ()) # doesn't crash
self.assertEqual(out, ())
@parameterized.named_parameters(
{"testcase_name": "_jit_loop={}_jit_body={}_jit_cond={}".format(
jit_loop, jit_body, jit_cond),
"jit_loop": jit_loop, "jit_body": jit_body, "jit_cond": jit_cond}
for jit_loop in [False, True]
for jit_body in [False, True]
for jit_cond in [False, True])
def testWhileJVP(self, jit_loop=True, jit_body=False, jit_cond=True):
cond = lambda x: x[0, 2] <= 8
body = lambda x: x * x
if jit_cond:
cond = api.jit(cond)
if jit_body:
body = api.jit(body)
loop = partial(lax.while_loop, cond, body)
if jit_loop:
loop = api.jit(loop)
loop_ref = partial(while_loop_reference, cond, body)
x = jnp.arange(9.).reshape((3, 3))
ans = api.jvp(loop, (x,), (x,))
expected = api.jvp(loop_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(loop, (x,), order=2, modes=["fwd"])
def testWhileJVPViaForiLoop(self):
f = lambda x: lax.fori_loop(0, 3, lambda i, x: x * 2, x)
self.assertAllClose(f(2.), 16., check_dtypes=False)
self.assertAllClose(api.jvp(f, (2.,), (1.,)), (16., 8.), check_dtypes=False)
jtu.check_grads(f, (2.,), order=2, modes=["fwd"])
f = lambda x: lax.fori_loop(0, 3, lambda i, x: x * (i + 1), x)
self.assertAllClose(f(2.), 12., check_dtypes=False)
self.assertAllClose(api.jvp(f, (2.,), (1.,)), (12., 6.), check_dtypes=False)
jtu.check_grads(f, (2.,), order=2, modes=["fwd"])
def testWhileJVPWithGrowingNonzeroTangents(self):
rng = np.random.RandomState(0)
def cond(state):
i, x, y, z = state
return i < 2
def body(state):
i, x, y, z = state
y = x * x
z = y * y
return i + 1, x, y, z
y, z = rng.randn(2), rng.randn(2)
def loop(loop_impl, x):
return loop_impl(cond, body, (0, x, y, z))[1]
loop_lax = partial(loop, lax.while_loop)
loop_ref = partial(loop, while_loop_reference)
x = rng.randn(2)
ans = api.jvp(loop_lax, (x,), (x,))
expected = api.jvp(loop_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(loop_lax, (x,), order=2, modes=["fwd"])
@parameterized.named_parameters(
dict(testcase_name="_loop={}".format(loop), loop=loop)
for loop in ["while", "fori", "fori_inside_cond", "fori_inside_scan"])
def testWhileGradError(self, loop: str = "fori_inside_scan"):
# Raise error for vjp for loops
if loop == "while":
func = lambda x: lax.while_loop(lambda i: i < 5., lambda i: i + 1., x)
elif loop == "fori":
func = lambda x: lax.fori_loop(x, x + 2., lambda i, c: c, x)
elif loop == "fori_inside_jit":
func = api.jit(lambda x: lax.fori_loop(x, x + 2., lambda i, c: c, x))
elif loop == "fori_inside_cond":
func = lambda x: lax.cond(True, x,
lambda x: lax.fori_loop(x, x + 2., lambda i, c: c, x),
1., lambda x: x)
elif loop == "fori_inside_scan":
func = lambda x: lax.scan(lambda c, x: (lax.fori_loop(x, x + 2., lambda i, c1: c1 * c, x),
None),
x, np.ones(2))[0]
else:
assert False
with self.assertRaisesRegex(ValueError, "Reverse-mode differentiation does not work for lax.while_loop"):
api.grad(func)(1.)
api.linearize(func, 1.) # Linearization works
def testIssue1316(self):
def f(carry, _):
c, key = carry
key, _ = random.split(key)
return (c, key), ()
key = random.PRNGKey(0)
api.grad(lambda c: lax.scan(f, (c, key), np.ones(3))[0][0])(0.) # doesn't crash
def testIssue1361(self):
@api.jit
def jit_run_scan(x):
def fun(carry, _):
x, _ = carry
return (2 * x, 0.), None
(x, _), _ = lax.scan(fun, (x, 0.), jnp.arange(3))
return x
api.grad(lambda x: jit_run_scan(x))(0.) # doesn't crash
def test_custom_root_scalar(self):
def scalar_solve(f, y):
return y / f(1.0)
def binary_search(func, x0, low=0.0, high=100.0):
del x0 # unused
def cond(state):
low, high = state
midpoint = 0.5 * (low + high)
return (low < midpoint) & (midpoint < high)
def body(state):
low, high = state
midpoint = 0.5 * (low + high)
update_upper = func(midpoint) > 0
low = jnp.where(update_upper, low, midpoint)
high = jnp.where(update_upper, midpoint, high)
return (low, high)
solution, _ = lax.while_loop(cond, body, (low, high))
return solution
def sqrt_cubed(x, tangent_solve=scalar_solve):
f = lambda y: y ** 2 - x ** 3
return lax.custom_root(f, 0.0, binary_search, tangent_solve)
value, grad = api.value_and_grad(sqrt_cubed)(5.0)
self.assertAllClose(value, 5 ** 1.5, check_dtypes=False, rtol=1e-6)
self.assertAllClose(grad, api.grad(pow)(5.0, 1.5), check_dtypes=False,
rtol=1e-7)
jtu.check_grads(sqrt_cubed, (5.0,), order=2,
rtol={jnp.float32: 1e-2, jnp.float64: 1e-3})
inputs = jnp.array([4.0, 5.0])
results = api.vmap(sqrt_cubed)(inputs)
self.assertAllClose(results, inputs ** 1.5, check_dtypes=False)
results = api.jit(sqrt_cubed)(5.0)
self.assertAllClose(results, 5.0 ** 1.5, check_dtypes=False,
rtol={np.float64:1e-7})
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_root_vector_with_solve_closure(self):
def vector_solve(f, y):
return jnp.linalg.solve(api.jacobian(f)(y), y)
def linear_solve(a, b):
f = lambda y: high_precision_dot(a, y) - b
x0 = jnp.zeros_like(b)
solution = jnp.linalg.solve(a, b)
oracle = lambda func, x0: solution
return lax.custom_root(f, x0, oracle, vector_solve)
rng = np.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
jtu.check_grads(linear_solve, (a, b), order=2,
atol={np.float32: 1e-2, np.float64: 1e-11})
actual = api.jit(linear_solve)(a, b)
expected = jnp.linalg.solve(a, b)
self.assertAllClose(expected, actual)
def test_custom_root_with_custom_linear_solve(self):
def linear_solve(a, b):
f = lambda x: high_precision_dot(a, x) - b
factors = jsp.linalg.cho_factor(a)
cho_solve = lambda f, b: jsp.linalg.cho_solve(factors, b)
def pos_def_solve(g, b):
return lax.custom_linear_solve(g, b, cho_solve, symmetric=True)
return lax.custom_root(f, b, cho_solve, pos_def_solve)
rng = np.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
actual = linear_solve(high_precision_dot(a, a.T), b)
expected = jnp.linalg.solve(high_precision_dot(a, a.T), b)
self.assertAllClose(expected, actual)
actual = api.jit(linear_solve)(high_precision_dot(a, a.T), b)
expected = jnp.linalg.solve(high_precision_dot(a, a.T), b)
self.assertAllClose(expected, actual)
jtu.check_grads(lambda x, y: linear_solve(high_precision_dot(x, x.T), y),
(a, b), order=2, rtol={jnp.float32: 1e-2})
def test_custom_root_errors(self):
with self.assertRaisesRegex(TypeError, re.escape("f() output pytree")):
lax.custom_root(lambda x: (x, x), 0.0, lambda f, x: x, lambda f, x: x)
with self.assertRaisesRegex(TypeError, re.escape("solve() output pytree")):
lax.custom_root(lambda x: x, 0.0, lambda f, x: (x, x), lambda f, x: x)
def dummy_root_usage(x):
f = lambda y: x - y
return lax.custom_root(f, 0.0, lambda f, x: x, lambda f, x: (x, x))
with self.assertRaisesRegex(
TypeError, re.escape("tangent_solve() output pytree")):
api.jvp(dummy_root_usage, (0.0,), (0.0,))
@parameterized.named_parameters(
{"testcase_name": "nonsymmetric", "symmetric": False},
{"testcase_name": "symmetric", "symmetric": True},
)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_linear_solve(self, symmetric):
def explicit_jacobian_solve(matvec, b):
return lax.stop_gradient(jnp.linalg.solve(api.jacobian(matvec)(b), b))
def matrix_free_solve(matvec, b):
return lax.custom_linear_solve(
matvec, b, explicit_jacobian_solve, explicit_jacobian_solve,
symmetric=symmetric)
def linear_solve(a, b):
return matrix_free_solve(partial(high_precision_dot, a), b)
rng = np.random.RandomState(0)
a = rng.randn(3, 3)
if symmetric:
a = a + a.T
b = rng.randn(3)
jtu.check_grads(linear_solve, (a, b), order=2, rtol=2e-3)
expected = jnp.linalg.solve(a, b)
actual = api.jit(linear_solve)(a, b)
self.assertAllClose(expected, actual)
c = rng.randn(3, 2)
expected = jnp.linalg.solve(a, c)
actual = api.vmap(linear_solve, (None, 1), 1)(a, c)
self.assertAllClose(expected, actual)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_linear_solve_zeros(self):
def explicit_jacobian_solve(matvec, b):
return lax.stop_gradient(jnp.linalg.solve(api.jacobian(matvec)(b), b))
def matrix_free_solve(matvec, b):
return lax.custom_linear_solve(matvec, b, explicit_jacobian_solve,
explicit_jacobian_solve)
def linear_solve(a, b):
return matrix_free_solve(partial(high_precision_dot, a), b)
rng = np.random.RandomState(0)
a = rng.randn(3, 3)
b = rng.randn(3)
jtu.check_grads(lambda x: linear_solve(x, b), (a,), order=2,
rtol={np.float32: 5e-3})
jtu.check_grads(lambda x: linear_solve(a, x), (b,), order=2,
rtol={np.float32: 5e-3})
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_linear_solve_iterative(self):
def richardson_iteration(matvec, b, omega=0.1, tolerance=1e-6):
# Equivalent to vanilla gradient descent:
# https://en.wikipedia.org/wiki/Modified_Richardson_iteration
def cond(x):
return jnp.linalg.norm(matvec(x) - b) > tolerance
def body(x):
return x + omega * (b - matvec(x))
return lax.while_loop(cond, body, b)
def matrix_free_solve(matvec, b):
return lax.custom_linear_solve(matvec, b, richardson_iteration,
richardson_iteration)
def build_and_solve(a, b):
# intentionally non-linear in a and b
matvec = partial(high_precision_dot, jnp.exp(a))
return matrix_free_solve(matvec, jnp.cos(b))
rng = np.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
expected = jnp.linalg.solve(jnp.exp(a), jnp.cos(b))
actual = build_and_solve(a, b)
self.assertAllClose(expected, actual, atol=1e-5)
jtu.check_grads(build_and_solve, (a, b), atol=1e-5, order=2,
rtol={jnp.float32: 6e-2, jnp.float64: 2e-3})
# vmap across an empty dimension
jtu.check_grads(
api.vmap(build_and_solve), (a[None, :, :], b[None, :]),
atol=1e-5,
order=2,
rtol={jnp.float32: 6e-2, jnp.float64: 2e-3})
def test_custom_linear_solve_cholesky(self):
def positive_definite_solve(a, b):
factors = jsp.linalg.cho_factor(a)
def solve(matvec, x):
return jsp.linalg.cho_solve(factors, x)
matvec = partial(high_precision_dot, a)
return lax.custom_linear_solve(matvec, b, solve, symmetric=True)
rng = np.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
expected = jnp.linalg.solve(np.asarray(posify(a)), b)
actual = positive_definite_solve(posify(a), b)
self.assertAllClose(expected, actual)
actual = api.jit(positive_definite_solve)(posify(a), b)
self.assertAllClose(expected, actual)
# numerical gradients are only well defined if ``a`` is guaranteed to be
# positive definite.
jtu.check_grads(
lambda x, y: positive_definite_solve(posify(x), y),
(a, b), order=2, rtol=1e-2)
def test_custom_linear_solve_complex(self):
def solve(a, b):
def solve(matvec, x):
return jsp.linalg.solve(a, x)
def tr_solve(matvec, x):
return jsp.linalg.solve(a.T, x)
matvec = partial(high_precision_dot, a)
return lax.custom_linear_solve(matvec, b, solve, tr_solve)
rng = np.random.RandomState(0)
a = 0.5 * rng.randn(2, 2) + 0.5j * rng.randn(2, 2)
b = 0.5 * rng.randn(2) + 0.5j * rng.randn(2)
jtu.check_grads(solve, (a, b), order=2, rtol=1e-2)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_linear_solve_lu(self):
def linear_solve(a, b):
a_factors = jsp.linalg.lu_factor(a)
at_factors = jsp.linalg.lu_factor(a.T)
def solve(matvec, x):
return jsp.linalg.lu_solve(a_factors, x)
def transpose_solve(vecmat, x):
return jsp.linalg.lu_solve(at_factors, x)
return lax.custom_linear_solve(
partial(high_precision_dot, a), b, solve, transpose_solve)
rng = np.random.RandomState(0)
a = rng.randn(3, 3)
b = rng.randn(3)
expected = jnp.linalg.solve(a, b)
actual = linear_solve(a, b)
self.assertAllClose(expected, actual)
jtu.check_grads(linear_solve, (a, b), order=2, rtol=2e-3)
# regression test for https://github.com/google/jax/issues/1536
jtu.check_grads(api.jit(linear_solve), (a, b), order=2,
rtol={np.float32: 2e-3})
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_linear_solve_without_transpose_solve(self):
def explicit_jacobian_solve(matvec, b):
return lax.stop_gradient(jnp.linalg.solve(api.jacobian(matvec)(b), b))
def loss(a, b):
matvec = partial(high_precision_dot, a)
x = lax.custom_linear_solve(matvec, b, explicit_jacobian_solve)
return jnp.sum(x)
rng = np.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
jtu.check_grads(loss, (a, b), order=2, modes=['fwd'],
atol={np.float32: 2e-3, np.float64: 1e-11})
jtu.check_grads(api.vmap(loss), (a[None,:,:], b[None,:]), order=2,
modes=['fwd'], atol={np.float32: 2e-3, np.float64: 1e-11})
with self.assertRaisesRegex(TypeError, "transpose_solve required"):
api.grad(loss)(a, b)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_linear_solve_pytree(self):
"""Test custom linear solve with inputs and outputs that are pytrees."""
def unrolled_matvec(mat, x):
"""Apply a Python list of lists of scalars to a list of scalars."""
result = []
for i in range(len(mat)):
v = 0
for j in range(len(x)):
if mat[i][j] is not None:
v += mat[i][j] * x[j]
result.append(v)
return result
def unrolled_substitution_solve(matvec, b, lower_tri):
"""Solve a triangular unrolled system with fwd/back substitution."""
zero = jnp.zeros(())
one = jnp.ones(())
x = [zero for _ in b]
ordering = range(len(b)) if lower_tri else range(len(b) - 1, -1, -1)
for i in ordering:
residual = b[i] - matvec(x)[i]
diagonal = matvec([one if i == j else zero for j in range(len(b))])[i]
x[i] = residual / diagonal
return x
def custom_unrolled_lower_tri_solve(mat, b):
return lax.custom_linear_solve(
partial(unrolled_matvec, mat), b,
partial(unrolled_substitution_solve, lower_tri=True),
partial(unrolled_substitution_solve, lower_tri=False))
mat = [[1.0, None, None, None, None, None, None],
[1.0, 1.0, None, None, None, None, None],
[None, 1.0, 1.0, None, None, None, None],
[None, None, 1.0, 1.0, None, None, None],
[None, None, None, 1.0, 1.0, None, None],
[None, None, None, None, None, 2.0, None],
[None, None, None, None, None, 4.0, 3.0]]
rng = np.random.RandomState(0)
b = list(rng.randn(7))
# Non-batched
jtu.check_grads(custom_unrolled_lower_tri_solve, (mat, b), order=2,
rtol={jnp.float32: 2e-2})
# Batch one element of b (which, because of unrolling, should only affect
# the first block of outputs)
b_bat = list(b)
b_bat[3] = rng.randn(3)
jtu.check_grads(
api.vmap(
custom_unrolled_lower_tri_solve,
in_axes=(None, [None, None, None, 0, None, None, None]),
out_axes=[0, 0, 0, 0, 0, None, None]), (mat, b_bat),
order=2,
rtol={jnp.float32: 1e-2})
# Batch one element of mat (again only affecting first block)
mat[2][1] = rng.randn(3)
mat_axis_tree = [
[0 if i == 2 and j == 1 else None for j in range(7)] for i in range(7)
]
jtu.check_grads(
api.vmap(
custom_unrolled_lower_tri_solve,
in_axes=(mat_axis_tree, None),
out_axes=[0, 0, 0, 0, 0, None, None]), (mat, b),
order=2)
def test_custom_linear_solve_errors(self):
solve = lambda f, x: x
with self.assertRaisesRegex(TypeError, re.escape("matvec() output pytree")):
lax.custom_linear_solve(lambda x: [x], 1.0, solve, solve)
with self.assertRaisesRegex(TypeError, re.escape("solve() output pytree")):
lax.custom_linear_solve(lambda x: x, 1.0, lambda f, x: [x], solve)
with self.assertRaisesRegex(
TypeError, re.escape("transpose_solve() output pytree")):
lax.custom_linear_solve(lambda x: x, 1.0, solve, lambda f, x: [x])
with self.assertRaisesRegex(ValueError, re.escape("solve() output shapes")):
lax.custom_linear_solve(lambda x: x, 1.0, lambda f, x: jnp.ones(2), solve)
def bad_matvec_usage(a):
return lax.custom_linear_solve(
lambda x: a * jnp.ones(2), 1.0, solve, solve)
with self.assertRaisesRegex(ValueError, re.escape("matvec() output shapes")):
api.jvp(bad_matvec_usage, (1.0,), (1.0,))
def testIssue810(self):
def loss(A):
def step(x, i):
return jnp.matmul(A, x), None
init_x = jnp.zeros(A.shape[-1:])
last_x, _ = lax.scan(step, init_x, jnp.arange(10))
return jnp.sum(last_x)
A = jnp.zeros((3, 3))
# The second DUS was unnecessarily replicating A across time.
# We check XLA because _scan_impl is "underneath" the jaxpr language.
s = str(api.xla_computation(api.grad(loss))(A).as_hlo_text())
assert s.count("dynamic-update-slice(") < 2
def testScanLengthArg(self):
def arange(n):
return lax.scan(lambda c, _: (c + 1, c), 0, None, length=n)[1]
ans = arange(10)
expected = np.arange(10)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_while_loop_of_pmap(self):
# code from jsnoek@
def body(i, x):
result = api.pmap(lambda z: lax.psum(jnp.sin(z), 'i'), axis_name='i')(x)
return result + x
f_loop = lambda x: lax.fori_loop(0, 3, body, x) # noqa: F821
ans = f_loop(jnp.ones(api.device_count()))
del body, f_loop
def body2(i, x):
result = jnp.broadcast_to(jnp.sin(x).sum(), x.shape)
return result + x
g_loop = lambda x: lax.fori_loop(0, 3, body2, x)
expected = g_loop(jnp.ones(api.device_count()))
self.assertAllClose(ans, expected, check_dtypes=False)
def test_while_loop_of_pmap_error_message(self):
def body(i, x):
result = api.pmap(lambda z: lax.psum(jnp.sin(z), 'i'), axis_name='i')(x)
return result + x
f_loop = lambda x: lax.fori_loop(0, 3, body, x)
too_big = 2 * api.device_count()
self.assertRaisesRegex(
ValueError,
re.escape(
"compiling a primitive computation `while` that requires {} "
"replicas, but only {} XLA devices are available on backend {}."
.format(too_big, api.device_count(), jtu.device_under_test())),
lambda: f_loop(jnp.ones(too_big)))
@parameterized.named_parameters(
{"testcase_name": "_{}".format(scan_name),
"scan": scan_impl}
for scan_impl, scan_name in SCAN_IMPLS)
def test_scan_reverse(self, scan):
def cumsum(x, reverse):
return scan(lambda c, x: (c + x, c + x), 0, x, reverse=reverse)[1]
x = np.array([3, 1, 4, 1, 5, 9])
self.assertAllClose(np.cumsum(x), cumsum(x, False), check_dtypes=False)
self.assertAllClose(np.cumsum(x[::-1])[::-1], cumsum(x, True), check_dtypes=False)
with api.disable_jit():
self.assertAllClose(np.cumsum(x), cumsum(x, False), check_dtypes=False)
with api.disable_jit():
self.assertAllClose(np.cumsum(x[::-1])[::-1], cumsum(x, True), check_dtypes=False)
def test_scan_unroll(self):
d = jnp.ones(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(jnp.tan(d)))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
xs = jnp.ones((5, 3))
c = jnp.ones(4)
scan = lambda c, xs: lax.scan(f, c, xs)
scan_unrolled = lambda c, xs: lax.scan(f, c, xs, unroll=2)
# jaxprs should be the same size
self.assertEqual(
len(str(api.make_jaxpr(scan)(c, xs))),
len(str(api.make_jaxpr(scan_unrolled)(c, xs))))
# but HLO should grow due to unrolling
self.assertLess(
len(str(api.xla_computation(scan)(c, xs).as_hlo_text())),
len(str(api.xla_computation(scan_unrolled)(c, xs).as_hlo_text())))
def test_disable_jit_cond_with_vmap(self):
# https://github.com/google/jax/issues/3093
def fn(t):
return lax.cond(t > 0, 0, lambda x: 0, 0, lambda x: 1)
fn = api.vmap(fn)
with api.disable_jit():
_ = fn(jnp.array([1])) # doesn't crash
def test_disable_jit_while_loop_with_vmap(self):
# https://github.com/google/jax/issues/2823
def trivial_while(y):
return lax.while_loop(lambda x: x < 10.0, lambda x: x + 1.0, y)
with api.disable_jit():
api.vmap(trivial_while)(jnp.array([3.0,4.0])) # doesn't crash
def test_vmaps_of_while_loop(self):
# https://github.com/google/jax/issues/3164
def f(x, n): return lax.fori_loop(0, n, lambda _, x: x + 1, x)
x, n = jnp.arange(3), jnp.arange(4)
api.vmap(api.vmap(f, (None, 0)), (0, None))(x, n) # doesn't crash
@parameterized.named_parameters(
{"testcase_name": f"_{shape}_axis={axis}",
"shape": shape, "axis": axis}
for shape in [
[0], [1], [2], [3], [5], [10], [1000],
[2, 3], [7, 5], [5, 6, 7]
]
for axis in range(-len(shape), len(shape) - 1))
def testAssociativeScanUnstructured(self, shape, axis):
data = np.arange(np.prod(shape)).reshape(shape) + 7
expected = np.cumsum(data, axis=axis)
result = lax.associative_scan(operator.add, data, axis=axis)
self.assertAllClose(result, expected, check_dtypes=False)
def testAssociativeScanUnstructured1000Reverse(self):
data = np.arange(1000) + 32
expected = np.cumsum(data[::-1])[::-1]
result = lax.associative_scan(operator.add, data, reverse=True)
self.assertAllClose(result, expected, check_dtypes=False)
def testAssociativeScanStructured3(self):
pair = collections.namedtuple('pair', ('first', 'second'))
data = pair(first=np.array([0., 1., 2.]),
second=np.array([0., 10., 20.]))
def fn(a, b):
return pair(first=a.first + b.first,
second=a.second + b.second)
result = lax.associative_scan(fn, elems=data)
self.assertAllClose(result.first, np.array([0., 1., 3.]),
check_dtypes=False)
self.assertAllClose(result.second, np.array([0., 10., 30.]),
check_dtypes=False)
def test_scan_typecheck_param(self):
d = jnp.ones(2)
def f(c, a):
b = jnp.cos(jnp.sum(a) + jnp.sum(c) + jnp.sum(d))
c = jnp.sin(c * b)
return c, b
xs = jnp.ones((5, 3))
c = jnp.ones(4)
scan_fun = lambda c, xs: lax.scan(f, c, xs)
def new_jaxpr():
jaxpr = api.make_jaxpr(scan_fun)(c, xs).jaxpr
scan = next(eqn for eqn in jaxpr.eqns if eqn.primitive.name == 'scan')
return jaxpr, scan
jaxpr, eqn = new_jaxpr()
eqn.params['reverse'] = 4
self.assertRaisesRegex(
core.JaxprTypeError,
re.escape('invalid scan param reverse of type int, bool required: 4'),
lambda: core.check_jaxpr(jaxpr))
jaxpr, eqn = new_jaxpr()
eqn.params['num_consts'] = -3
self.assertRaisesRegex(
core.JaxprTypeError,
re.escape('invalid scan param num_consts of type int, '
'non-negative int required: -3'),
lambda: core.check_jaxpr(jaxpr))
def test_cond_typecheck_param(self):
def new_jaxpr():
jaxpr = api.make_jaxpr(
lambda x: lax.switch(0, [jnp.sin, jnp.cos], x))(1.).jaxpr
cond = next(eqn for eqn in jaxpr.eqns if eqn.primitive.name == 'cond')
return jaxpr, cond
jaxpr, eqn = new_jaxpr()
eqn.params['branches'] = (4, 2)
self.assertRaisesRegex(
core.JaxprTypeError,
re.escape('invalid cond param branches of type tuple, '
'tuple of ClosedJaxpr required: (4, 2)'),
lambda: core.check_jaxpr(jaxpr))
jaxpr, eqn = new_jaxpr()
eqn.params['linear'] = (4, 2)
self.assertRaisesRegex(
core.JaxprTypeError,
re.escape('invalid cond param linear of type tuple, '
'tuple of bool required: (4, 2)'),
lambda: core.check_jaxpr(jaxpr))
jaxpr, eqn = new_jaxpr()
eqn.params['linear'] = 'multi\nline'
self.assertRaisesRegex(
core.JaxprTypeError,
r'invalid cond param linear of type str, '
r'tuple of bool required:\nmulti\nline',
lambda: core.check_jaxpr(jaxpr))
@parameterized.named_parameters(
{"testcase_name": f"_dtype={dtype.__name__}", "dtype": dtype}
for dtype in jtu.dtypes.all_integer)
def test_scan_init_weak_type(self, dtype):
def func(carry, x):
return carry + x, x
init_weak = 0 # Python scalars are weakly-typed.
x = jnp.ones(5, dtype=dtype)
carry, result = lax.scan(func, init_weak, x)
self.assertEqual(carry, x.sum())
self.assertArraysEqual(result, x)
@parameterized.named_parameters(
{"testcase_name": f"_dtype={dtype.__name__}", "dtype": dtype}
for dtype in jtu.dtypes.all_integer)
def test_while_loop_init_weak_type(self, dtype):
# This tests whether lax.while_loop can properly handle weakly-typed
# initial values.
def cond_fun(val):
return val < 2
def body_fun(val):
return val + increment
increment = jnp.array(1, dtype=dtype)
init_weak = 0 # Python scalars are weakly-typed.
result = lax.while_loop(cond_fun, body_fun, init_weak)
self.assertArraysEqual(result, jnp.full_like(increment, 2))
def test_scan_vjp_forwards_extensive_residuals(self):
# https://github.com/google/jax/issues/4510
def cumprod(x):
s = jnp.ones((2, 32), jnp.float32)
return lax.scan(lambda s, x: (x*s, s), s, x)
rng = np.random.RandomState(1234)
x = jnp.asarray(rng.randn(32, 2, 32).astype('float32'))
_, vjp_fun = api.vjp(cumprod, x)
# Need to spelunk into vjp_fun. This is fragile, and if it causes problems
# just skip this test.
*_, ext_res = vjp_fun.args[0].args[0]
self.assertIs(ext_res, x)
x = rng.randn(32, 2, 32).astype('float32') # numpy.ndarray, not DeviceArray
_, vjp_fun = api.vjp(cumprod, x)
*_, ext_res = vjp_fun.args[0].args[0]
self.assertIsInstance(ext_res, xla.DeviceArray)
def test_scan_vmap_collectives(self):
def scan_f(state, x):
s = lax.psum(state, 'i') * x
return state, s
def scan(state, xs):
return lax.scan(scan_f, state, xs)
scan_v = api.vmap(scan, in_axes=0, out_axes=0, axis_name='i')
self.assertAllClose(
scan_v(jnp.ones([1]), jnp.arange(5).reshape((1, 5))),
(jnp.array([1.]), jnp.array([[0., 1., 2., 3., 4.]])))
def test_xla_cpu_gpu_loop_cond_bug(self):
# https://github.com/google/jax/issues/5900
def deriv(f):
return lambda x, *args: jax.linearize(lambda x: f(x, *args), x)[1](1.0)
def _while_loop(cond_fun, body_fun, init_val, max_iter):
def _iter(val):
next_val = body_fun(val)
next_cond = True
return next_val, next_cond
def _fun(tup, _):
val, cond = tup
return jax.lax.cond(cond, _iter, lambda x: (x, False), val), _
init = (init_val, cond_fun(init_val))
return jax.lax.scan(_fun, init, None, length=max_iter)[0][0]
def my_pow(x, y):
def body_fun(val):
return val * x
def cond_fun(val):
return True
return _while_loop(cond_fun, body_fun, 1.0, y)
self.assertAllClose(deriv(my_pow)(3.0, 1), 1.0, check_dtypes=False)
def test_unexpected_tracer_error(self):
with self.assertRaisesRegex(core.UnexpectedTracerError,
"transformed by while_loop"):
lst = []
def side_effecting_body(val):
lst.append(val)
return val+1
lax.while_loop(lambda x: x < 2, side_effecting_body, 1)
lst[0] += 1
with self.assertRaisesRegex(core.UnexpectedTracerError,
"transformed by scan"):
lst = []
def side_effecting_scan(carry, val):
lst.append(val)
return carry, val+1
lax.scan(side_effecting_scan, None, jnp.ones((2, 2)))
lst[0] += 1
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| 33.255787 | 110 | 0.604788 | [
"ECL-2.0",
"Apache-2.0"
] | cdfreeman-google/jax | tests/lax_control_flow_test.py | 87,629 | Python |
import bpy
from bpy.props import BoolProperty, StringProperty
import os
from ..preferences import get_pref
class RSN_OT_CreatCompositorNode(bpy.types.Operator):
bl_idname = "rsn.creat_compositor_node"
bl_label = "Separate Passes"
use_passes: BoolProperty(default=False)
view_layer: StringProperty(default="")
def set_context_layer(self):
nt = bpy.context.scene.node_tree
context_layer = None
for node in bpy.context.scene.node_tree.nodes:
if node.name == f'RSN {bpy.context.window.view_layer.name} Render Layers':
context_layer = node
if not context_layer:
context_layer = nt.nodes.new(type="CompositorNodeRLayers")
context_layer.name = f'RSN {bpy.context.window.view_layer.name} Render Layers'
try:
com = bpy.context.scene.node_tree.nodes['Composite']
nt.links.new(context_layer.outputs[0], com.inputs[0])
except Exception as e:
self.report({"ERROR"}, 'No Composite Node Found(Check its name must be "Composite") ')
def execute(self, context):
scn = context.scene
scn.use_nodes = True
nt = context.scene.node_tree
self.set_context_layer()
try:
render_layer_node = nt.nodes[f'RSN {self.view_layer} Render Layers']
except:
render_layer_node = nt.nodes.new(type="CompositorNodeRLayers")
render_layer_node.name = f'RSN {self.view_layer} Render Layers'
if self.view_layer != '':
render_layer_node.layer = self.view_layer
try:
nt.nodes.remove(nt.nodes[f'RSN {self.view_layer} Output'])
except Exception as e:
pass
if self.use_passes:
file_output_node = nt.nodes.new(type="CompositorNodeOutputFile")
file_output_node.name = f"RSN {self.view_layer} Output"
file_output_node.label = f"RSN {self.view_layer} Output"
file_output_node.base_path = os.path.join(context.scene.render.filepath, self.view_layer)
file_output_node.location = (400, -300)
file_output_node.width = 200
file_output_node.hide = True
nt = context.scene.node_tree
pref = get_pref()
separator = pref.node_file_path.file_path_separator
for i, output in enumerate(render_layer_node.outputs):
name = output.name
output_name = f"{self.view_layer}{separator}{name}{separator}"
if output_name not in file_output_node.file_slots:
file_output_node.file_slots.new(name=output_name)
nt.links.new(render_layer_node.outputs[name], file_output_node.inputs[output_name])
return {"FINISHED"}
def register():
bpy.utils.register_class(RSN_OT_CreatCompositorNode)
def unregister():
bpy.utils.unregister_class(RSN_OT_CreatCompositorNode)
| 35.047619 | 101 | 0.647758 | [
"Apache-2.0"
] | MarcoHoo/RenderStackNode | operators/compositor_nodetree.py | 2,944 | Python |
from tests import create_rand
def prepare_database_with_table(name: str, rows: list):
from peewee import IntegerField, Proxy, CharField, Model
from playhouse.sqlite_ext import CSqliteExtDatabase
db = Proxy()
db.initialize(CSqliteExtDatabase(':memory:', bloomfilter=True))
NameModel = type(name, (Model,), {
'id_': IntegerField(primary_key=True, column_name='id'),
'name': CharField(column_name='name')
})
table: Model = NameModel()
table.bind(db)
db.create_tables([NameModel])
for row in rows:
table.insert(row).execute()
return db
def test_ds_list():
from rand.providers.ds import RandDatasetBaseProvider, ListDatasetTarget
db = {
'names': [{'name': 'test1'}, {'name': 'test1'}],
'cities': [{'name': 'test2'}, {'name': 'test2'}],
}
ds = RandDatasetBaseProvider(prefix='ds', target=ListDatasetTarget(db=db))
rand = create_rand()
rand.register_provider(ds)
assert rand.gen('(:ds_get:)', ['names']) == ['test1']
assert rand.gen('(:ds_get_names:)-(:ds_get_cities:)') == ['test1-test2']
def test_ds_db():
from rand.providers.ds import RandDatasetBaseProvider, DBDatasetTarget
rows = [{'name': 'test'}, {'name': 'test'}]
db = prepare_database_with_table('names', rows)
ds = RandDatasetBaseProvider(prefix='ds', target=DBDatasetTarget(db=db))
rand = create_rand()
rand.register_provider(ds)
assert rand.gen('(:ds_get:)', ['names']) == ['test']
| 33.133333 | 78 | 0.652582 | [
"MIT"
] | kororo/rand | tests/test_ds.py | 1,491 | Python |
# from flask import Flask, Blueprint
# from flask_sqlalchemy import SQLAlchemy
# from flask_login import LoginManager
# import os
from flask import Flask, jsonify, request, make_response, redirect, url_for
import jwt
import datetime
import os
from functools import wraps
from flask_sqlalchemy import SQLAlchemy
import uuid
from werkzeug.security import generate_password_hash, check_password_hash
from werkzeug.utils import secure_filename
from sqlalchemy import select
from flask_migrate import Migrate, migrate
from flask_cors import CORS
from sqlalchemy import inspect
from sqlalchemy import Table, Column, MetaData, Integer, Computed
from numpy import array
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secretollave'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///todo.db'
ABSOLUTE_PATH_TO_YOUR_FOLDER ='/home/dani/flask/static/fotosPerfil'
ABSOLUTE_PATH_TO_YOUR_PDF_FOLDER ='/home/dani/flask/static/pdf'
CORS(app)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
# Models
class Usuario(db.Model):
nick = db.Column(db.String(20), primary_key=True)
Nombre_de_usuario = db.Column(db.String(50))
password = db.Column(db.String(50))
e_mail = db.Column(db.String(50), unique=True, nullable=False)
descripcion = db.Column(db.String(1000))
link = db.Column(db.String(200))
foto_de_perfil = db.Column(db.String(400))
class Sigue(db.Model):
#id = db.Column(db.Integer, primary_key=True )
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
Usuario_Nickb = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Chat(db.Model):
#Column('timestamp', TIMESTAMP(timezone=False), nullable=False, default=datetime.now())
timestamp = db.Column(db.TIMESTAMP, nullable=False,
server_default=db.func.now(),
onupdate=db.func.now())
mensaje = db.Column(db.String(1000))
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
Usuario_Nickb = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Publicacion(db.Model):
id = db.Column(Integer,primary_key=True)
#id = db.Sequence('id', start=1, increment=1)
descripcion = db.Column(db.String(1000))
#Column('timestamp', TIMESTAMP(timezone=False), nullable=False, default=datetime.now())
timestamp = db.Column(db.TIMESTAMP, nullable=False,
server_default=db.func.now(),
onupdate=db.func.now())
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'))
class Propia(db.Model):
pdf = db.Column(db.String(400))
id = db.Column(db.String(20), db.ForeignKey('publicacion.id'),primary_key=True)
class Recomendacion(db.Model):
link = db.Column(db.String(200),nullable=False)
titulo = db.Column(db.String(200),nullable=False)
autor = db.Column(db.String(200),nullable=False)
id = db.Column(db.String(20), db.ForeignKey('publicacion.id'),primary_key=True)
class Tematica(db.Model):
tema = db.Column(db.String(50), primary_key=True )
class Notificaciones(db.Model):
id = db.Column(db.Integer, primary_key=True )
fecha = db.Column(db.Date)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Prefiere(db.Model):
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
tema = db.Column(db.String(50), db.ForeignKey('tematica.tema'),primary_key=True)
class Trata_pub_del_tema(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
tema = db.Column(db.String(50), db.ForeignKey('tematica.tema'),primary_key=True)
class Gusta(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Comenta(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
comentario = db.Column(db.String(1000))
class Guarda(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Trata(db.Model):
id_publi = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
id_notif = db.Column(db.String(20), db.ForeignKey('notificaciones.id'),primary_key=True)
class Genera(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
#token = request.args.get('token') #http://127.0.0.1:5000/route?token=djsnvidnoffofn
#data = request.get_json()
token = request.headers['token']
#token = data['token']
if not token:
return jsonify({'error': 'Token no existe'}), 403
try:
data = jwt.decode(token, app.config['SECRET_KEY'])
current_user = Usuario.query.filter_by(nick=data['nick']).first()
current_user = data['nick']
except:
return jsonify({'error': 'Token no valido'}), 403
return f(current_user,*args, **kwargs)
return decorated
@app.route('/unprotected')
def unprotected():
return jsonify({'message': 'Puede entrar tol mundo'})
@app.route('/protected')
@token_required
def protected(current_user):
print(current_user)
return jsonify({'message': 'Puedes entrar si puedes'})
# Ruta para el login
@app.route('/register', methods=['POST'])
def add_data():
data= request.get_json()
#nick = request.form.get("nick")
#password = request.form.get("password")
#e_mail = request.form.get("e_mail")
user = Usuario.query.filter_by(e_mail=data['e_mail']).first()
nick = Usuario.query.filter_by(nick=data['nick']).first()
if user: # si esto devuelve algo entonces el email existe
return jsonify({'error': 'Existe correo'}) #json diciendo error existe email
if nick:
return jsonify({'error': 'Existe nick'})
#if (check_email(e_mail) == True and check_password(data['password']) == True ):
register = Usuario(nick=data['nick'],password=generate_password_hash(data['password']), e_mail=data['e_mail'],foto_de_perfil="platon.jpg")
db.session.add(register)
db.session.commit()
token = jwt.encode({'nick' : data['nick'], 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/login', methods=['POST'])
def login():
# auth = request.authorization #new ESTO SI LO HACES CON AUTH
data= request.get_json()
if '@' in data['nickOcorreo']:
user = Usuario.query.filter_by(e_mail=data['nickOcorreo']).first()
else:
user = Usuario.query.filter_by(nick=data['nickOcorreo']).first()
if not user:
return jsonify({'error': 'No existe ese usuario'})#error mal user
if not check_password_hash(user.password, data['password']):
return jsonify({'error': 'Mal contraseña'}) #error mala contraseña
token = jwt.encode({'nick' : data['nickOcorreo'], 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=9999999)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/editarPerfil', methods=['GET'])
@token_required
def editarPerfilget(current_user):
s = select([Usuario.Nombre_de_usuario, Usuario.descripcion,Usuario.link, Usuario.foto_de_perfil]).where((Usuario.nick == current_user))
result = db.session.execute(s)
seguidos= db.session.query(Sigue).filter(Sigue.Usuario_Nicka == current_user ).count()
seguidores= db.session.query(Sigue).filter(Sigue.Usuario_Nickb == current_user ).count()
nposts= db.session.query(Publicacion).filter(Publicacion.Usuario_Nicka == current_user ).count()
tema = select([Prefiere.tema]).where((Prefiere.Usuario_Nicka == current_user))
temas = db.session.execute(tema)
vector = []
for row in temas:
vector += row
for row in result:
fila = {
"nick": current_user,
"nombre_de_usuario":row[0],
"descripcion":row[1],
"link":row[2],
"foto_de_perfil": 'http://51.255.50.207:5000/display/' + row[3],
"nsiguiendo": seguidos,
"nseguidores": seguidores,
"nposts": nposts,
"tematicas": vector
#"foto_de_perfil" :url_for('static', filename='fotosPerfil/' + row[3])
}
return fila
@app.route('/display/<filename>')
def foto(filename):
return redirect(url_for('static', filename='fotosPerfil/' + filename),code = 301)
@app.route('/editarPerfil', methods=['POST'])
@token_required
def editarPerfilpost(current_user):
data= request.get_json()
user = Usuario.query.filter_by(nick=current_user).first()
user.Nombre_de_usuario = data['nombre_de_usuario']
print(data['nombre_de_usuario'])
print(data['descripcion'])
print(data['link'])
print(data['tematicas'])
user.descripcion = data['descripcion']
user.link = data['link']
tematicas = data['tematicas']
for temas in tematicas:
tema = Prefiere.query.filter_by(tema=temas).first()
if not tema:
tema = Prefiere(Usuario_Nicka=current_user, tema = temas)
db.session.add(tema)
#db.session.commit()
#cambia_foto
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/actualizarImagen', methods=['POST'])
@token_required
def actualizarImagen(current_user):
user = Usuario.query.filter_by(nick=current_user).first()
if request.files['nueva_foto'] is not None: #data['cambia_foto']:
file = request.files['nueva_foto']
print(request.files['nueva_foto'])
filename = secure_filename(file.filename)
file.save(os.path.join(ABSOLUTE_PATH_TO_YOUR_FOLDER, filename))
user.foto_de_perfil = filename
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/subirPost', methods=['POST'])
@token_required
def subirPost(current_user):
data= request.get_json()
publicacion = Publicacion(descripcion=data['descripcion'],Usuario_Nicka=current_user) #coger id
db.session.add(publicacion)
db.session.commit()
tematicas = data['tematicas']
for temas in tematicas:
temita = Tematica.query.filter_by(tema=temas).first()
if temita:
nuevo = Trata_pub_del_tema(id=publicacion.id, tema = temita.tema)
db.session.add(nuevo)
db.session.commit()
if (data['tipo']=="1"): # articulo
print("xd")
guardarPDF(request.files['pdf'], publicacion.id)
elif(data['tipo']=="2"): # recomendacion
recomendacion = Recomendacion(link=data['link'],titulo=data['titulo'], autor = data['autor'], id = publicacion.id)
db.session.add(recomendacion)
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
def guardarPDF(pdf,_id):
propia = Propia.query.filter_by(id=_id).first()
if pdf is not None:
file = pdf
print(pdf)
filename = secure_filename(file.filename)
file.save(os.path.join(ABSOLUTE_PATH_TO_YOUR_PDF_FOLDER, filename))
propia.pdf = filename
db.session.add(propia)
@app.route('/getPostsPropios', methods=['GET'])
@token_required
def getPostsPropios(current_user):
data= request.get_json()
a = select([Usuario.Nombre_de_usuario]).where((Usuario.nick == current_user))
resulta = db.session.execute(a)
#s = select([Publicacion.Usuario_Nicka, Publicacion.descripcion,Publicacion.timestamp]).where((Publicacion.Usuario_Nicka == current_user and Publicacion.id>data['id']-8 and and Publicacion.id<=data['id'])).order_by(Publicacion.id)
s=select(Publicacion).where(Publicacion.Usuario_Nicka == current_user).order_by(Publicacion.id.desc())
results = db.session.execute(s)
for r in results:
for i in range(data['id']-8,data['id']):
a = select([Propia.id, Propia.pdf]).where((Propia.id == r.id))
resulta = db.session.execute(a)
Gustas= db.session.query(Gusta).filter(Gusta.Usuario_Nicka == current_user, Gusta.id == row[1] ).count()
Comentarios= db.session.query(Comenta).filter(Comenta.Usuario_Nicka == current_user, Comenta.id == row[1] ).count()
Guardados= db.session.query(Guarda).filter(Guarda.Usuario_Nicka == current_user, Guarda.id == row[1] ).count()
fila = {
"id": r.id,
"nick": current_user,
"descripcion":r.descripcion,
"timestamp":r.timestamp,
"pdf": 'http://51.255.50.207:5000/display2/' + a.pdf,
"nlikes": Gustas,
"ncomentarios": Comentarios,
"nguardados": Guardados,
"usuario": resulta.nombre_de_usuario
}
return fila
@app.route('/display2/<filename>')
def pdf(filename):
return redirect(url_for('static', filename='pdf/' + filename),code = 301)
@app.route('/getPostsRecomendados', methods=['GET'])
@token_required
def getPostsRecomendados(current_user):
#data= request.get_json()
a = select([Usuario.Nombre_de_usuario]).where((Usuario.nick == current_user))
resultb = db.session.execute(a)
Nombre_de_usuario = ""
for b in resultb:
Nombre_de_usuario=b.Nombre_de_usuario
#s = select([Publicacion.Usuario_Nicka, Publicacion.descripcion,Publicacion.timestamp]).where((Publicacion.Usuario_Nicka == current_user and Publicacion.id>data['id']-8 and and Publicacion.id<=data['id'])).order_by(Publicacion.id)
s = select([Publicacion]).where(Publicacion.Usuario_Nicka == current_user).order_by(Publicacion.id.desc())
results = db.session.execute(s)
# for record in results:
# print("\n", record)
vector0 = array([])
vector1 = []
vector2 = []
for r in results:
print(str(r.id))
vector0 += r.id
vector1 += str(r.descripcion)
vector2 += str(r.timestamp)
# for r in results:
# for b in resultb:
# a = select([Recomendacion.id, Recomendacion.link,Recomendacion.titulo,Recomendacion.autor]).where((Recomendacion.id == r.id))
# resulta = db.session.execute(a)
# for a in resultaa:
# Gustas= db.session.query(Gusta).filter(Gusta.Usuario_Nicka == current_user, Gusta.id == r.id ).count()
# Comentarios= db.session.query(Comenta).filter(Comenta.Usuario_Nicka == current_user, Comenta.id == r.id ).count()
# Guardados= db.session.query(Guarda).filter(Guarda.Usuario_Nicka == current_user, Guarda.id == r.id ).count()
print(vector0)
fila = {
"id": vector0,
#"link": a.link,
#"titulo": a.titulo,
#"autor": a.autor,
"nick": current_user,
"descripcion": vector1,
"timestamp": vector2,
#"nlikes": Gustas,
#"ncomentarios": Comentarios,
#"nguardados": Guardados,
"usuario": Nombre_de_usuario
}
return fila
def check_email(email):
regex = '^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$'
if(re.search(regex,email)):
return True
else:
return False
# Contraseñas de entre 8 y 32 carácteres.
def check_password(password):
regex = '^(?=.*[0-9])(?=.*[a-z])(?=.*[A-Z])(?=.*[*.!@$%^&(){}[]:;<>,.?/~_+-=|\]).{8,32}$'
if(re.search(regex,password)):
return True
else:
return False
if __name__ == '__main__':
app.run(debug=True)
| 33.831967 | 235 | 0.656208 | [
"MIT"
] | UNIZAR-30226-2022-09/back-end | .vscode-server/data/User/History/-1f47d17c/IWlp.py | 16,514 | Python |
"""A setuptools based setup module.
"""
# Always prefer setuptools over distutils
from setuptools import setup
setup(
name='ooinstall',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version="3.0.0",
description="Ansible wrapper for OpenShift Enterprise 3 installation.",
# The project's main homepage.
url="http://github.com/openshift/openshift-extras/tree/enterprise-3.0/oo-install",
# Author details
author="[email protected]",
author_email="OpenShift",
# Choose your license
license="Apache 2.0",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Topic :: Utilities',
],
# What does your project relate to?
keywords='oo-install setuptools development',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
#packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
packages=['ooinstall'],
package_dir={'ooinstall': 'src/ooinstall'},
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['click', 'PyYAML'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'ooinstall': ['ansible.cfg', 'ansible-quiet.cfg', 'ansible_plugins/*'],
},
tests_require=['nose'],
test_suite='nose.collector',
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'oo-install=ooinstall.cli_installer:cli',
],
},
)
| 32.790123 | 86 | 0.670557 | [
"Apache-2.0"
] | DennisPeriquet/origin-ci-tool | oct/ansible/openshift-ansible/utils/setup.py | 2,656 | Python |
import pytest
from . import specparser
def test_load() -> None:
spec = specparser.Spec.loads(
"""
[meta]
version = 0
[enum]
user_level = ["beginner", "intermediate", "advanced"]
[directive._parent]
content_type = "block"
options.foo = ["path", "uri"]
[directive.child]
inherit = "_parent"
argument_type = "user_level"
deprecated = true
[role._parent]
help = "test-role"
type = "text"
[role.child]
inherit = "_parent"
[rstobject._parent]
help = "test-rstobject"
[rstobject.child]
inherit = "_parent"
"""
)
assert spec.meta.version == 0
assert spec.enum["user_level"] == ["beginner", "intermediate", "advanced"]
assert spec.directive["child"] == specparser.Directive(
inherit="_parent",
example=None,
help=None,
content_type="block",
argument_type="user_level",
required_context=None,
deprecated=True,
domain=None,
options={"foo": [specparser.PrimitiveType.path, specparser.PrimitiveType.uri]},
name="child",
)
# Test these in the opposite order of the definition to ensure that each "type" of definition
# has a separate inheritance namespace
assert spec.rstobject["child"].help == "test-rstobject"
assert spec.role["child"].help == "test-role"
assert spec.role["child"].type == specparser.PrimitiveRoleType.text
validator = spec.get_validator(
[specparser.PrimitiveType.nonnegative_integer, "user_level"]
)
assert validator("10") == 10
assert validator("intermediate") == "intermediate"
with pytest.raises(ValueError):
validator("-10")
with pytest.raises(ValueError):
validator("foo")
def test_inheritance_cycle() -> None:
with pytest.raises(ValueError):
specparser.Spec.loads(
"""
[meta]
version = 0
[directive.parent]
inherit = "child"
[directive.child]
inherit = "parent"
"""
)
def test_missing_parent() -> None:
with pytest.raises(ValueError):
specparser.Spec.loads(
"""
[meta]
version = 0
[directive._parent]
content_type = "block"
[directive.child]
inherit = "parent"
"""
)
def test_bad_type() -> None:
spec = specparser.Spec.loads(
"""
[meta]
version = 0
"""
)
with pytest.raises(ValueError):
spec.get_validator("gjriojwe")
def test_bad_version() -> None:
with pytest.raises(ValueError):
specparser.Spec.loads(
"""
[meta]
version = -1"""
)
with pytest.raises(ValueError):
specparser.Spec.loads(
"""
[meta]
version = 1"""
)
def test_bad_link() -> None:
with pytest.raises(ValueError):
specparser.Spec.loads(
"""
[meta]
version = 0
[role."kotlin-sdk"]
type = {link = "https://docs.mongodb.com/realm-sdks/kotlin/latest/"}"""
)
with pytest.raises(ValueError):
specparser.Spec.loads(
"""
[meta]
version = 0
[role."kotlin-sdk"]
type = {link = "https://docs.mongodb.com/realm-sdks/%s/kotlin/latest/%s"}"""
)
| 21.721854 | 97 | 0.57561 | [
"MIT"
] | will-riddy/Covid-Dashboard | .myenv/Lib/site-packages/snooty/test_specparser.py | 3,280 | Python |
# dagutil.py - dag utilities for mercurial
#
# Copyright 2010 Benoit Boissinot <[email protected]>
# and Peter Arrenbrecht <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from node import nullrev
from i18n import _
class basedag(object):
'''generic interface for DAGs
terms:
"ix" (short for index) identifies a nodes internally,
"id" identifies one externally.
All params are ixs unless explicitly suffixed otherwise.
Pluralized params are lists or sets.
'''
def __init__(self):
self._inverse = None
def nodeset(self):
'''set of all node idxs'''
raise NotImplementedError()
def heads(self):
'''list of head ixs'''
raise NotImplementedError()
def parents(self, ix):
'''list of parents ixs of ix'''
raise NotImplementedError()
def inverse(self):
'''inverse DAG, where parents becomes children, etc.'''
raise NotImplementedError()
def ancestorset(self, starts, stops=None):
'''
set of all ancestors of starts (incl), but stop walk at stops (excl)
'''
raise NotImplementedError()
def descendantset(self, starts, stops=None):
'''
set of all descendants of starts (incl), but stop walk at stops (excl)
'''
return self.inverse().ancestorset(starts, stops)
def headsetofconnecteds(self, ixs):
'''
subset of connected list of ixs so that no node has a descendant in it
By "connected list" we mean that if an ancestor and a descendant are in
the list, then so is at least one path connecting them.
'''
raise NotImplementedError()
def externalize(self, ix):
'''return a list of (or set if given a set) of node ids'''
return self._externalize(ix)
def externalizeall(self, ixs):
'''return a list of (or set if given a set) of node ids'''
ids = self._externalizeall(ixs)
if isinstance(ixs, set):
return set(ids)
return list(ids)
def internalize(self, id):
'''return a list of (or set if given a set) of node ixs'''
return self._internalize(id)
def internalizeall(self, ids, filterunknown=False):
'''return a list of (or set if given a set) of node ids'''
ixs = self._internalizeall(ids, filterunknown)
if isinstance(ids, set):
return set(ixs)
return list(ixs)
class genericdag(basedag):
'''generic implementations for DAGs'''
def ancestorset(self, starts, stops=None):
stops = stops and set(stops) or set()
seen = set()
pending = list(starts)
while pending:
n = pending.pop()
if n not in seen and n not in stops:
seen.add(n)
pending.extend(self.parents(n))
return seen
def headsetofconnecteds(self, ixs):
hds = set(ixs)
if not hds:
return hds
for n in ixs:
for p in self.parents(n):
hds.discard(p)
assert hds
return hds
class revlogbaseddag(basedag):
'''generic dag interface to a revlog'''
def __init__(self, revlog, nodeset):
basedag.__init__(self)
self._revlog = revlog
self._heads = None
self._nodeset = nodeset
def nodeset(self):
return self._nodeset
def heads(self):
if self._heads is None:
self._heads = self._getheads()
return self._heads
def _externalize(self, ix):
return self._revlog.index[ix][7]
def _externalizeall(self, ixs):
idx = self._revlog.index
return [idx[i][7] for i in ixs]
def _internalize(self, id):
ix = self._revlog.rev(id)
if ix == nullrev:
raise LookupError(id, self._revlog.indexfile, _('nullid'))
return ix
def _internalizeall(self, ids, filterunknown):
rl = self._revlog
if filterunknown:
return [r for r in map(rl.nodemap.get, ids)
if r is not None and r != nullrev]
return map(self._internalize, ids)
class revlogdag(revlogbaseddag):
'''dag interface to a revlog'''
def __init__(self, revlog):
revlogbaseddag.__init__(self, revlog, set(xrange(len(revlog))))
def _getheads(self):
return [r for r in self._revlog.headrevs() if r != nullrev]
def parents(self, ix):
rlog = self._revlog
idx = rlog.index
revdata = idx[ix]
prev = revdata[5]
if prev != nullrev:
prev2 = revdata[6]
if prev2 == nullrev:
return [prev]
return [prev, prev2]
prev2 = revdata[6]
if prev2 != nullrev:
return [prev2]
return []
def inverse(self):
if self._inverse is None:
self._inverse = inverserevlogdag(self)
return self._inverse
def ancestorset(self, starts, stops=None):
rlog = self._revlog
idx = rlog.index
stops = stops and set(stops) or set()
seen = set()
pending = list(starts)
while pending:
rev = pending.pop()
if rev not in seen and rev not in stops:
seen.add(rev)
revdata = idx[rev]
for i in [5, 6]:
prev = revdata[i]
if prev != nullrev:
pending.append(prev)
return seen
def headsetofconnecteds(self, ixs):
if not ixs:
return set()
rlog = self._revlog
idx = rlog.index
headrevs = set(ixs)
for rev in ixs:
revdata = idx[rev]
for i in [5, 6]:
prev = revdata[i]
if prev != nullrev:
headrevs.discard(prev)
assert headrevs
return headrevs
def linearize(self, ixs):
'''linearize and topologically sort a list of revisions
The linearization process tries to create long runs of revs where
a child rev comes immediately after its first parent. This is done by
visiting the heads of the given revs in inverse topological order,
and for each visited rev, visiting its second parent, then its first
parent, then adding the rev itself to the output list.
'''
sorted = []
visit = list(self.headsetofconnecteds(ixs))
visit.sort(reverse=True)
finished = set()
while visit:
cur = visit.pop()
if cur < 0:
cur = -cur - 1
if cur not in finished:
sorted.append(cur)
finished.add(cur)
else:
visit.append(-cur - 1)
visit += [p for p in self.parents(cur)
if p in ixs and p not in finished]
assert len(sorted) == len(ixs)
return sorted
class inverserevlogdag(revlogbaseddag, genericdag):
'''inverse of an existing revlog dag; see revlogdag.inverse()'''
def __init__(self, orig):
revlogbaseddag.__init__(self, orig._revlog, orig._nodeset)
self._orig = orig
self._children = {}
self._roots = []
self._walkfrom = len(self._revlog) - 1
def _walkto(self, walkto):
rev = self._walkfrom
cs = self._children
roots = self._roots
idx = self._revlog.index
while rev >= walkto:
data = idx[rev]
isroot = True
for prev in [data[5], data[6]]: # parent revs
if prev != nullrev:
cs.setdefault(prev, []).append(rev)
isroot = False
if isroot:
roots.append(rev)
rev -= 1
self._walkfrom = rev
def _getheads(self):
self._walkto(nullrev)
return self._roots
def parents(self, ix):
if ix is None:
return []
if ix <= self._walkfrom:
self._walkto(ix)
return self._children.get(ix, [])
def inverse(self):
return self._orig
| 29.672662 | 79 | 0.564432 | [
"BSD-3-Clause"
] | l2dy/machg | LocalMercurial/mercurial/dagutil.py | 8,249 | Python |
import requests
import argparse
import asyncio
import json
import logging
import websockets
import os.path
from collections import namedtuple
from game import Game
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
wslogger = logging.getLogger('websockets')
wslogger.setLevel(logging.WARN)
logger = logging.getLogger('Server')
logger.setLevel(logging.INFO)
Player = namedtuple('Player', ['name', 'ws'])
class Game_server:
def __init__(self, mapfile, ghosts, level_ghosts, lives, timeout, grading=None):
self.game = Game(mapfile, ghosts, level_ghosts, lives, timeout)
self.game_properties = {'map': mapfile,
'n_ghosts': ghosts,
'l_ghosts': level_ghosts}
self.players = asyncio.Queue()
self.viewers = set()
self.current_player = None
self.grading = grading
async def incomming_handler(self, websocket, path):
try:
async for message in websocket:
data = json.loads(message)
if data["cmd"] == "join":
map_info = self.game.info()
await websocket.send(map_info)
if path == "/player":
logger.info("<%s> has joined", data["name"])
await self.players.put(Player(data["name"], websocket))
if path == "/viewer":
self.viewers.add(websocket)
if data["cmd"] == "key" and self.current_player.ws == websocket:
logger.debug((self.current_player.name, data))
self.game.keypress(data["key"][0])
except websockets.exceptions.ConnectionClosed as c:
logger.info("Client disconnected")
if websocket in self.viewers:
self.viewers.remove(websocket)
async def mainloop(self):
while True:
logger.info("Waiting for players")
self.current_player = await self.players.get()
if self.current_player.ws.closed:
logger.error("<{}> disconnect while waiting".format(self.current_player.name))
continue
try:
logger.info("Starting game for <{}>".format(self.current_player.name))
self.game.start(self.current_player.name)
if self.grading:
game_rec = dict(self.game_properties)
game_rec['player'] = self.current_player.name
while self.game.running:
await self.game.next_frame()
await self.current_player.ws.send(self.game.state)
if self.viewers:
await asyncio.wait([client.send(self.game.state) for client in self.viewers])
await self.current_player.ws.send(json.dumps({"score": self.game.score}))
logger.info("Disconnecting <{}>".format(self.current_player.name))
except websockets.exceptions.ConnectionClosed as c:
self.current_player = None
finally:
if self.grading:
game_rec['score'] = self.game.score
r = requests.post(self.grading, json=game_rec)
if self.current_player:
await self.current_player.ws.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--bind", help="IP address to bind to", default="")
parser.add_argument("--port", help="TCP port", type=int, default=8000)
parser.add_argument("--ghosts", help="Number of ghosts", type=int, default=1)
parser.add_argument("--level", help="difficulty level of ghosts", choices=['0','1','2','3'], default='1')
parser.add_argument("--lives", help="Number of lives", type=int, default=3)
parser.add_argument("--timeout", help="Timeout after this amount of steps", type=int, default=3000)
parser.add_argument("--map", help="path to the map bmp", default="data/map1.bmp")
parser.add_argument("--grading-server", help="url of grading server", default=None)
args = parser.parse_args()
g = Game_server(args.map, args.ghosts, int(args.level), args.lives, args.timeout, args.grading_server)
game_loop_task = asyncio.ensure_future(g.mainloop())
websocket_server = websockets.serve(g.incomming_handler, args.bind, args.port)
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(websocket_server, game_loop_task))
loop.close()
| 42.018018 | 109 | 0.595197 | [
"MIT"
] | catarinaacsilva/pacman | server.py | 4,664 | Python |
import bs4
from selenium import webdriver
import sys
import time
import os
def getWFSlot(productUrl):
driver = webdriver.Firefox()
driver.get(productUrl)
html = driver.page_source
soup = bs4.BeautifulSoup(html)
time.sleep(60)
no_open_slots = True
while no_open_slots:
driver.refresh()
print("refreshed")
html = driver.page_source
soup = bs4.BeautifulSoup(html)
time.sleep(4)
slot_patterns = ['Next available', '1-hour delivery windows', '2-hour delivery windows']
try:
next_slot_text = soup.find('h4', class_ ='ufss-slotgroup-heading-text a-text-normal').text
if any(next_slot_text in slot_pattern for slot_pattern in slot_patterns):
print('SLOTS OPEN!')
os.system('say "Slots for delivery opened!"')
no_open_slots = False
time.sleep(1400)
except AttributeError:
pass
try:
slot_opened_text = "Not available"
all_dates = soup.findAll("div", {"class": "ufss-date-select-toggle-text-availability"})
for each_date in all_dates:
if slot_opened_text not in each_date.text:
print('SLOTS OPEN!')
os.system('say "Slots for delivery opened!"')
no_open_slots = False
time.sleep(1400)
except AttributeError:
pass
try:
no_slot_pattern = 'No delivery windows available. New windows are released throughout the day.'
if no_slot_pattern == soup.find('h4', class_ ='a-alert-heading').text:
print("NO SLOTS!")
except AttributeError:
print('SLOTS OPEN!')
os.system('say "Slots for delivery opened!"')
no_open_slots = False
getWFSlot('https://www.amazon.com/gp/buy/shipoptionselect/handlers/display.html?hasWorkingJavascript=1')
| 30.737705 | 104 | 0.6256 | [
"Apache-2.0"
] | NizZ8/Whole-Foods-Delivery-Slot | whole_foods_delivery_slot_firefox.py | 1,875 | Python |
class OrderedList:
def __init__(self, unique=False):
self.list = []
self.__unique = unique
def add(self, value):
i = 0
while (i < len(self.list)) and (self.list[i] < value):
i += 1
if self.__unique:
if len(self.list) == i or self.list[i] != value:
self.list.insert(i, value)
else:
self.list.insert(i, value)
def is_empty(self):
return (len(self.list) == 0)
def remove_min(self):
if len(self.list) == 0:
return None
return self.list.pop(0)
def remove_max(self):
if len(self.list) == 0:
return None
return self.list.pop()
def get_min(self):
if len(self.list) == 0:
return None
return self.list[0]
def get_max(self):
if len(self.list) == 0:
return None
return self.list[-1] | 25.081081 | 62 | 0.501078 | [
"MIT"
] | epmcj/nextflix | src/client_py/olist.py | 928 | Python |
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient
from recipe_app.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe_app:ingredient-list')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicIngredientsAPITests(TestCase):
"""Test endpoints that don't require authentication."""
def setUp(self):
self.client = APIClient()
def test_login_required_to_view_ingredients(self):
"""Test that authentication is needed to view the ingredients."""
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientsAPITests(TestCase):
"""Test endpoints that require authentication."""
def setUp(self):
self.client = APIClient()
self.user = create_user(
fname='Test',
lname='User',
email='[email protected]',
password='testpass'
)
self.client.force_authenticate(user=self.user)
def test_retrieve_ingredients_is_successful(self):
"""Test retrieve ingredients"""
Ingredient.objects.create(user=self.user, name='Carrot')
Ingredient.objects.create(user=self.user, name='Lemon')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_retrieved_ingredients_limited_to_user(self):
"""Tests that only the user's ingredients are retrieved"""
user2 = create_user(
fname='Test2',
lname='User2',
email='[email protected]',
password='test2pass'
)
Ingredient.objects.create(user=user2, name='Carrot')
ingredient = Ingredient.objects.create(user=self.user, name='Lemon')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_is_successful(self):
"""Test that creating a new ingredient is successful."""
payload = {
'name': 'Lemon'
}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_ingredient_with_invalid_details_invalid(self):
"""Test that ingredients is not created with invalid details"""
payload = {
'name': ''
}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
| 29.428571 | 76 | 0.667961 | [
"MIT"
] | oyekanmiayo/recipe-app-api | app/recipe_app/tests/test_ingredients_api.py | 3,090 | Python |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class lbvserver_appfwpolicy_binding(base_resource) :
"""Binding class showing the appfwpolicy that can be bound to lbvserver."""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._gotopriorityexpression = ""
self._sc = ""
self._bindpoint = ""
self._invoke = False
self._labeltype = ""
self._labelname = ""
self._name = ""
self.___count = 0
@property
def priority(self) :
"""Priority."""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""Priority.
:param priority:
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE."""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
:param gotopriorityexpression:
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def policyname(self) :
"""Name of the policy bound to the LB vserver."""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""Name of the policy bound to the LB vserver.
:param policyname:
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def name(self) :
"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1
:param name:
"""
try :
self._name = name
except Exception as e:
raise e
@property
def bindpoint(self) :
"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE."""
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE
:param bindpoint:
"""
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def labeltype(self) :
"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel."""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel
:param labeltype:
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the label invoked."""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
"""Name of the label invoked.
:param labelname:
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def invoke(self) :
"""Invoke policies bound to a virtual server or policy label."""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
"""Invoke policies bound to a virtual server or policy label.
:param invoke:
"""
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def sc(self) :
"""Use SureConnect on the virtual server.<br/>Default value: OFF<br/>Possible values = ON, OFF."""
try :
return self._sc
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(lbvserver_appfwpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lbvserver_appfwpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
"""
:param client:
:param resource:
"""
try :
if resource and type(resource) is not list :
updateresource = lbvserver_appfwpolicy_binding()
updateresource.name = resource.name
updateresource.policyname = resource.policyname
updateresource.priority = resource.priority
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.bindpoint = resource.bindpoint
updateresource.invoke = resource.invoke
updateresource.labeltype = resource.labeltype
updateresource.labelname = resource.labelname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [lbvserver_appfwpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policyname = resource[i].policyname
updateresources[i].priority = resource[i].priority
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].bindpoint = resource[i].bindpoint
updateresources[i].invoke = resource[i].invoke
updateresources[i].labeltype = resource[i].labeltype
updateresources[i].labelname = resource[i].labelname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
"""
:param client:
:param resource:
"""
try :
if resource and type(resource) is not list :
deleteresource = lbvserver_appfwpolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
deleteresource.bindpoint = resource.bindpoint
deleteresource.priority = resource.priority
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [lbvserver_appfwpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].bindpoint = resource[i].bindpoint
deleteresources[i].priority = resource[i].priority
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
"""Use this API to fetch lbvserver_appfwpolicy_binding resources.
:param service:
:param name:
"""
try :
obj = lbvserver_appfwpolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
"""Use this API to fetch filtered set of lbvserver_appfwpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param service:
:param name:
:param filter_:
"""
try :
obj = lbvserver_appfwpolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
"""Use this API to count lbvserver_appfwpolicy_binding resources configued on NetScaler.
:param service:
:param name:
"""
try :
obj = lbvserver_appfwpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
"""Use this API to count the filtered set of lbvserver_appfwpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param service:
:param name:
:param filter_:
"""
try :
obj = lbvserver_appfwpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Sc:
""" """
ON = "ON"
OFF = "OFF"
class Bindpoint:
""" """
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
class Labeltype:
""" """
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class lbvserver_appfwpolicy_binding_response(base_response) :
""" """
def __init__(self, length=1) :
self.lbvserver_appfwpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lbvserver_appfwpolicy_binding = [lbvserver_appfwpolicy_binding() for _ in range(length)]
| 33.182039 | 308 | 0.58657 | [
"Apache-2.0"
] | HanseMerkur/nitro-python | nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py | 13,671 | Python |
#
# Autogenerated by Frugal Compiler (3.4.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
import asyncio
from datetime import timedelta
import inspect
from frugal.aio.processor import FBaseProcessor
from frugal.aio.processor import FProcessorFunction
from frugal.exceptions import TApplicationExceptionType
from frugal.exceptions import TTransportExceptionType
from frugal.middleware import Method
from frugal.transport import TMemoryOutputBuffer
from frugal.util.deprecate import deprecated
from thrift.Thrift import TApplicationException
from thrift.Thrift import TMessageType
from thrift.transport.TTransport import TTransportException
from . import f_BasePinger
from .ttypes import *
class Iface(f_BasePinger.Iface):
async def ping(self, ctx):
"""
Args:
ctx: FContext
"""
pass
class Client(f_BasePinger.Client, Iface):
def __init__(self, provider, middleware=None):
"""
Create a new Client with an FServiceProvider containing a transport
and protocol factory.
Args:
provider: FServiceProvider
middleware: ServiceMiddleware or list of ServiceMiddleware
"""
middleware = middleware or []
if middleware and not isinstance(middleware, list):
middleware = [middleware]
super(Client, self).__init__(provider, middleware=middleware)
middleware += provider.get_middleware()
self._methods.update({
'ping': Method(self._ping, middleware),
})
async def ping(self, ctx):
"""
Args:
ctx: FContext
"""
return await self._methods['ping']([ctx])
async def _ping(self, ctx):
memory_buffer = TMemoryOutputBuffer(self._transport.get_request_size_limit())
oprot = self._protocol_factory.get_protocol(memory_buffer)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin('ping', TMessageType.CALL, 0)
args = ping_args()
args.write(oprot)
oprot.writeMessageEnd()
response_transport = await self._transport.request(ctx, memory_buffer.getvalue())
iprot = self._protocol_factory.get_protocol(response_transport)
iprot.read_response_headers(ctx)
_, mtype, _ = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
if x.type == TApplicationExceptionType.RESPONSE_TOO_LARGE:
raise TTransportException(type=TTransportExceptionType.RESPONSE_TOO_LARGE, message=x.message)
raise x
result = ping_result()
result.read(iprot)
iprot.readMessageEnd()
class Processor(f_BasePinger.Processor):
def __init__(self, handler, middleware=None):
"""
Create a new Processor.
Args:
handler: Iface
"""
if middleware and not isinstance(middleware, list):
middleware = [middleware]
super(Processor, self).__init__(handler, middleware=middleware)
self.add_to_processor_map('ping', _ping(Method(handler.ping, middleware), self.get_write_lock()))
class _ping(FProcessorFunction):
def __init__(self, handler, lock):
super(_ping, self).__init__(handler, lock)
async def process(self, ctx, iprot, oprot):
args = ping_args()
args.read(iprot)
iprot.readMessageEnd()
result = ping_result()
try:
ret = self._handler([ctx])
if inspect.iscoroutine(ret):
ret = await ret
except TApplicationException as ex:
async with self._lock:
_write_application_exception(ctx, oprot, "ping", exception=ex)
return
except Exception as e:
async with self._lock:
_write_application_exception(ctx, oprot, "ping", ex_code=TApplicationExceptionType.INTERNAL_ERROR, message=str(e))
raise
async with self._lock:
try:
oprot.write_response_headers(ctx)
oprot.writeMessageBegin('ping', TMessageType.REPLY, 0)
result.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
except TTransportException as e:
# catch a request too large error because the TMemoryOutputBuffer always throws that if too much data is written
if e.type == TTransportExceptionType.REQUEST_TOO_LARGE:
raise _write_application_exception(ctx, oprot, "ping", ex_code=TApplicationExceptionType.RESPONSE_TOO_LARGE, message=e.message)
else:
raise e
def _write_application_exception(ctx, oprot, method, ex_code=None, message=None, exception=None):
if exception is not None:
x = exception
else:
x = TApplicationException(type=ex_code, message=message)
oprot.write_response_headers(ctx)
oprot.writeMessageBegin(method, TMessageType.EXCEPTION, 0)
x.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
return x
class ping_args(object):
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('ping_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ping_result(object):
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('ping_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| 31.599119 | 147 | 0.626516 | [
"Apache-2.0"
] | trevorackerman-wk/frugal | test/expected/python.asyncio/service_extension_same_file/f_Pinger.py | 7,173 | Python |
from __future__ import annotations
import argparse
import atexit
import itertools
import shlex
import shutil
import signal
import subprocess
import sys
import traceback
from pathlib import Path
from typing import Dict, List, Optional, Tuple
from pyoomph import ast, ast2ir, ast_transformer, c_output, ir, parser
python_code_dir = Path(__file__).absolute().parent
project_root = python_code_dir.parent
class CompilationUnit:
ast: List[ast.ToplevelDeclaration]
def __init__(self, source_path: Path, session: c_output.Session):
self.source_path = source_path
self.session = session
def _handle_error(self) -> None:
traceback.print_exc()
print(f"\nThis happened while compiling {self.source_path}", file=sys.stderr)
sys.exit(1)
def create_untyped_ast(self) -> None:
try:
source_code = self.source_path.read_text(encoding="utf-8")
self.ast = ast_transformer.transform_file(
parser.parse_file(
source_code, self.source_path, project_root / "stdlib"
)
)
except Exception:
self._handle_error()
def create_c_code(self, exports: List[ir.Symbol]) -> None:
try:
the_ir = ast2ir.convert_program(self.ast, self.source_path, exports)
self.session.create_c_code(the_ir, self.source_path)
except Exception:
self._handle_error()
def get_c_compiler_command(c_paths: List[Path], exepath: Path) -> Tuple[List[str], str]:
compile_info = {}
with (project_root / "obj" / "compile_info.txt").open() as file:
for line in file:
key, value = line.rstrip("\n").split("=", maxsplit=1)
compile_info[key] = value
before_files = (
[compile_info["cc"]]
+ shlex.split(compile_info["cflags"])
+ [str(path) for path in project_root.glob("obj/*.o")]
)
after_files = (
["-o", str(exepath)]
+ shlex.split(compile_info["ldflags"])
+ ["-I", str(project_root)]
)
return (
before_files + [str(path) for path in c_paths] + after_files,
" ".join(
[shlex.quote(arg) for arg in before_files]
+ [f"<{len(c_paths)} files>"]
+ [shlex.quote(arg) for arg in after_files]
),
)
def run(command: List[str], verbose: bool, human_readable: Optional[str] = None) -> int:
if verbose:
if human_readable is None:
human_readable = " ".join(map(shlex.quote, command))
print("Running:", human_readable, file=sys.stderr)
return subprocess.run(command).returncode
def get_compilation_dir(parent_dir: Path, name_hint: str) -> Path:
for i in itertools.count():
path = parent_dir / (name_hint + str(i))
path.mkdir(parents=True, exist_ok=True)
try:
(path / "compiling").touch(exist_ok=False)
except FileExistsError:
# Another instance of oomph compiler running in parallel
continue
else:
atexit.register((path / "compiling").unlink)
return path
assert False # make mypy feel good
def compute_dependency_graph(
session: c_output.Session,
infile: Path,
verbose: bool,
) -> Dict[CompilationUnit, List[Path]]:
dependency_graph: Dict[CompilationUnit, List[Path]] = {}
queue = [infile]
while queue:
# Pop the next source file to parse
source_path = queue.pop()
if source_path in (unit.source_path for unit in dependency_graph.keys()):
continue
if verbose:
print("Parsing", source_path)
# Create a compilation unit out of it and parse it into an untyped ast
candidate_unit = CompilationUnit(source_path, session)
candidate_unit.create_untyped_ast()
# Calculate its dependencies and add them to the dependencies dictionary,
# including builtins if necessary, and add those dependencies to the queue
current_dependencies = [
top_declaration.path
for top_declaration in candidate_unit.ast
if isinstance(top_declaration, ast.Import)
]
if source_path != project_root / "builtins.oomph":
current_dependencies.append(project_root / "builtins.oomph")
dependency_graph[candidate_unit] = current_dependencies
queue.extend(current_dependencies)
return dependency_graph
def compute_compilation_order(
verbose: bool,
dependency_graph: Dict[CompilationUnit, List[Path]],
) -> List[CompilationUnit]:
compilation_order: List[CompilationUnit] = []
while len(compilation_order) < len(dependency_graph):
candidate_unit = next(
u for u in dependency_graph.keys() if u not in compilation_order
)
breadcrumbs = [candidate_unit]
while True:
uncompiled_dependencies = [
u
for u in dependency_graph.keys()
if u not in compilation_order
and u.source_path in dependency_graph[candidate_unit]
]
if not uncompiled_dependencies:
break
candidate_unit = uncompiled_dependencies[0]
if candidate_unit in breadcrumbs:
message = (
" --> ".join(d.source_path.name for d in breadcrumbs)
+ " --> "
+ candidate_unit.source_path.name
)
raise RuntimeError("cyclic imports: " + message)
breadcrumbs.append(candidate_unit)
compilation_order.append(candidate_unit)
return compilation_order
def main() -> None:
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("infile", type=Path)
arg_parser.add_argument("-o", "--outfile", type=Path)
arg_parser.add_argument("--valgrind", default="")
arg_parser.add_argument("-v", "--verbose", action="store_true")
compiler_args, program_args = arg_parser.parse_known_args()
try:
cache_dir = compiler_args.infile.parent / ".oomph-cache"
cache_dir.mkdir(exist_ok=True)
except OSError:
cache_dir = Path.cwd() / ".oomph-cache"
cache_dir.mkdir(exist_ok=True)
# Create a compiler session
session = c_output.Session(
get_compilation_dir(cache_dir, compiler_args.infile.stem + "_compilation")
)
# Calculate the dependency graph
dependency_graph = compute_dependency_graph(
session, compiler_args.infile.absolute(), compiler_args.verbose
)
# Calculate in which order we need to compile our units
compilation_order = compute_compilation_order(
compiler_args.verbose, dependency_graph
)
# Compile in the calculated order
for unit in compilation_order:
if compiler_args.verbose:
print("Creating C code:", unit.source_path)
unit.create_c_code(session.symbols)
# Write out everything and compile it
c_paths = session.write_everything(project_root / "builtins.oomph")
exe_path = session.compilation_dir / compiler_args.infile.stem
command, human_readable_command = get_c_compiler_command(c_paths, exe_path)
result = run(command, compiler_args.verbose, human_readable_command)
if result != 0:
sys.exit(result)
# If we have an outfile path, move the resulting executable to it and bail
if compiler_args.outfile is not None:
assert not compiler_args.outfile.is_dir() # shutil.move is weird for dirs
shutil.move(str(exe_path), str(compiler_args.outfile))
if compiler_args.verbose:
print("Moved executable to", compiler_args.outfile)
return
# Otherwise, run it directly
command = shlex.split(compiler_args.valgrind) + [str(exe_path)] + program_args
result = run(command, compiler_args.verbose)
if result < 0: # killed by signal
message = f"Program killed by signal {abs(result)}"
try:
message += f" ({signal.Signals(abs(result)).name})"
except ValueError: # e.g. SIGRTMIN + 1
pass
print(message, file=sys.stderr)
elif result > 0:
print(f"Program exited with status {result}", file=sys.stderr)
sys.exit(result)
main()
| 35.088983 | 88 | 0.643521 | [
"MIT"
] | Akuli/oomph | pyoomph/__main__.py | 8,281 | Python |
import logging
from typing import List, Union
from cactus.consensus.block_record import BlockRecord
from cactus.consensus.blockchain_interface import BlockchainInterface
from cactus.consensus.constants import ConsensusConstants
from cactus.types.blockchain_format.sized_bytes import bytes32
from cactus.types.full_block import FullBlock
from cactus.types.header_block import HeaderBlock
from cactus.types.unfinished_block import UnfinishedBlock
from cactus.types.unfinished_header_block import UnfinishedHeaderBlock
from cactus.util.ints import uint64
log = logging.getLogger(__name__)
def final_eos_is_already_included(
header_block: Union[UnfinishedHeaderBlock, UnfinishedBlock, HeaderBlock, FullBlock],
blocks: BlockchainInterface,
sub_slot_iters: uint64,
) -> bool:
"""
Args:
header_block: An overflow block, with potentially missing information about the new sub slot
blocks: all blocks that have been included before header_block
sub_slot_iters: sub_slot_iters at the header_block
Returns: True iff the missing sub slot was already included in a previous block. Returns False if the sub
slot was not included yet, and therefore it is the responsibility of this block to include it
"""
if len(header_block.finished_sub_slots) > 0:
# We already have an included empty sub slot, which means the prev block is 2 sub slots behind.
return False
curr: BlockRecord = blocks.block_record(header_block.prev_header_hash)
# We also check if curr is close to header_block, which means it's in the same sub slot
seen_overflow_block = curr.overflow and (header_block.total_iters - curr.total_iters < sub_slot_iters // 2)
while not curr.first_in_sub_slot and not curr.height == 0:
if curr.overflow and header_block.total_iters - curr.total_iters < sub_slot_iters // 2:
seen_overflow_block = True
curr = blocks.block_record(curr.prev_hash)
if curr.first_in_sub_slot and seen_overflow_block:
# We have seen another overflow block in this slot (same as header_block), therefore there are no
# missing sub slots
return True
# We have not seen any overflow blocks, therefore header_block will have to include the missing sub slot in
# the future
return False
def get_block_challenge(
constants: ConsensusConstants,
header_block: Union[UnfinishedHeaderBlock, UnfinishedBlock, HeaderBlock, FullBlock],
blocks: BlockchainInterface,
genesis_block: bool,
overflow: bool,
skip_overflow_last_ss_validation: bool,
):
if len(header_block.finished_sub_slots) > 0:
if overflow:
# New sub-slot with overflow block
if skip_overflow_last_ss_validation:
# In this case, we are missing the final sub-slot bundle (it's not finished yet), however
# There is a whole empty slot before this block is infused
challenge: bytes32 = header_block.finished_sub_slots[-1].challenge_chain.get_hash()
else:
challenge = header_block.finished_sub_slots[
-1
].challenge_chain.challenge_chain_end_of_slot_vdf.challenge
else:
# No overflow, new slot with a new challenge
challenge = header_block.finished_sub_slots[-1].challenge_chain.get_hash()
else:
if genesis_block:
challenge = constants.GENESIS_CHALLENGE
else:
if overflow:
if skip_overflow_last_ss_validation:
# Overflow infusion without the new slot, so get the last challenge
challenges_to_look_for = 1
else:
# Overflow infusion, so get the second to last challenge. skip_overflow_last_ss_validation is False,
# Which means no sub slots are omitted
challenges_to_look_for = 2
else:
challenges_to_look_for = 1
reversed_challenge_hashes: List[bytes32] = []
curr: BlockRecord = blocks.block_record(header_block.prev_header_hash)
while len(reversed_challenge_hashes) < challenges_to_look_for:
if curr.first_in_sub_slot:
assert curr.finished_challenge_slot_hashes is not None
reversed_challenge_hashes += reversed(curr.finished_challenge_slot_hashes)
if curr.height == 0:
assert curr.finished_challenge_slot_hashes is not None
assert len(curr.finished_challenge_slot_hashes) > 0
break
curr = blocks.block_record(curr.prev_hash)
challenge = reversed_challenge_hashes[challenges_to_look_for - 1]
return challenge
| 46.543689 | 120 | 0.695244 | [
"Apache-2.0"
] | Cactus-Network/cactus-blockchain | cactus/consensus/get_block_challenge.py | 4,794 | Python |
# -*- coding: utf-8 -*-
from functools import lru_cache
import requests
from requests.packages.urllib3.util.retry import Retry
# https://urllib3.readthedocs.io/en/latest/reference/urllib3.util.html#module-urllib3.util.retry
DEFAULT_RETRIES = 5
DEFAULT_BACKOFF_FACTOR = 0.1
DEFAULT_STATUS_FORCELIST = [500, 502, 503, 504]
@lru_cache(maxsize=None)
def get_session(name, concurrency=50):
session = requests.Session()
retry = Retry(
total=DEFAULT_RETRIES,
backoff_factor=DEFAULT_BACKOFF_FACTOR,
status_forcelist=DEFAULT_STATUS_FORCELIST,
)
# Default HTTPAdapter uses 10 connections. Mount custom adapter to increase
# that limit. Connections are established as needed, so using a large value
# should not negatively impact performance.
http_adapter = requests.adapters.HTTPAdapter(
pool_connections=concurrency, pool_maxsize=concurrency, max_retries=retry
)
session.mount("https://", http_adapter)
session.mount("http://", http_adapter)
return session
| 31.242424 | 96 | 0.743938 | [
"MPL-2.0"
] | klahnakoski/pyLibrary | mozci/util/req.py | 1,031 | Python |
from werkzeug.wrappers import Request
from flask import Flask, redirect, url_for, request, flash
from flask_sqlalchemy import SQLAlchemy
import os
import requests
import random
from contact_form import ContactForm
from flask_dance.contrib.github import make_github_blueprint, github
from flask_dance.contrib.gitlab import make_gitlab_blueprint, gitlab
from discord_webhook import DiscordWebhook
import flask
from os import path
from flask_dance.consumer import oauth_authorized
app = Flask(__name__, template_folder="templates", static_folder='static')
# Various environmental variables
app.secret_key = os.environ.get("FLASK_SECRET")
discord_url = os.environ.get("WEBHOOK")
FLASK_HOST = os.environ.get("FLASK_HOST")
app.config["GITHUB_OAUTH_CLIENT_ID"] = os.environ.get(
"REPOSI_GITHUB_CLIENT_ID")
app.config["GITHUB_OAUTH_CLIENT_SECRET"] = os.environ.get(
"REPOSI_GITHUB_SECRET")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
# Github blueprint
github_bp = make_github_blueprint()
github_bp.redirect_url = FLASK_HOST+"/docs"
app.register_blueprint(github_bp, url_prefix="/login")
app.config["GITLAB_OAUTH_CLIENT_ID"] = os.environ.get(
"REPOSI_GITLAB_ID")
app.config["GITLAB_OAUTH_CLIENT_SECRET"] = os.environ.get(
"REPOSI_GITLAB_SECRET")
gitlab_bp = make_gitlab_blueprint()
app.register_blueprint(gitlab_bp, url_prefix="/login")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
# Database model & connection
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///db.sqlite"
db = SQLAlchemy(app)
git_token = os.environ.get("GITHUB_TOKEN")
print(git_token)
@oauth_authorized.connect
def redirect_to_docs(blueprint, token):
blueprint.token = token
user = []
git_hash = []
resp = github.get("/user")
user = User.query.filter_by(username=resp.json()['login']).first()
if not user:
user = User(username=resp.json()['login'],
github_hash=str(random.getrandbits(128)))
db.session.add(user)
db.session.commit()
DiscordWebhook(url=discord_url, content=f"New user: {resp.json()['login']}. Check out profile at https://github.com/{resp.json()['login']}").execute()
git_hash = user.github_hash
return redirect(f"/docs?username={resp.json()['login']}&token={git_hash}")
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True, nullable=False)
github_hash = db.Column(db.String(80), unique=True, nullable=True)
# gitlab_hash = db.Column(db.String(80), unique=True, nullable=True)
def __repr__(self):
return '<User %r>' % self.username
if path.exists("db.sqlite") == True:
print("Database exists")
else:
print("Creating database")
db.create_all()
# Routing and repository parsing
@app.route("/signup")
def signup():
resp = github.get("/user")
if not github.authorized:
return redirect(url_for("github.login"))
print(resp)
assert resp.ok
user = User.query.filter_by(username=resp.json()['login']).first()
username = resp.json()['login']
github_hash = user.github_hash
return redirect(f"/docs?username={username}&token={github_hash}")
def parseGithubRepos(repos):
parsedRepos = []
displayForks = request.args.get('forks')
for repo in repos:
parsedRepo = {
'name': repo['full_name'],
'description': repo['description'],
'issues': repo['open_issues'],
'owner': repo['owner']['login'],
'stars': repo['stargazers_count'],
'forks': repo['forks_count'],
'url': repo['html_url'],
'size': repo['size'],
'language': repo['language']
}
if parsedRepo['description'] == None:
parsedRepo['description'] = "No description provided"
if displayForks == 'hidden':
if repo['fork'] == False:
parsedRepos.append(parsedRepo)
else:
parsedRepos.append(parsedRepo)
# if repo['fork'] == False: parsedRepos.append(parsedRepo)
parsedRepos.sort(key=lambda repo: repo["stars"], reverse=True)
return parsedRepos
@app.route("/widget/<username>")
def thing(username):
token = request.args.get('token')
db.session.commit()
user = User.query.filter_by(username=username).first()
resp = {}
theme = request.args.get('theme')
if theme != 'dark': theme = 'light'
if user == None:
return "User not found"
else:
repos = []
if user.github_hash == token:
page = 1
resp = requests.get(
f"https://api.github.com/users/{username}/repos?per_page=100&page=1", auth=("Uzay-G", git_token)).json()
while resp != []:
print(resp, "\n\n\n")
repos += parseGithubRepos(resp)
page += 1
resp = requests.get(
f"https://api.github.com/users/{username}/repos?per_page=100&page={page}", auth=("Uzay-G", git_token)).json()
if type(resp) is dict:
return f'ERROR: {resp["message"]}'
return flask.render_template('widget.html', repos=repos, theme=theme)
else:
return "You do not have a valid api token"
@app.route("/")
def serveMain():
form = ContactForm()
return flask.render_template('index.html', form=form)
@app.route("/docs")
def docs():
form = ContactForm()
return flask.render_template('docs.html', username=request.args.get('username'), token=request.args.get("token"), hostname=FLASK_HOST, form=form)
@app.route("/contact", methods=['POST'])
def contact():
form = ContactForm()
if form.validate_on_submit():
flash('Your message was received')
DiscordWebhook(url=discord_url, content=f"Contact @hackathon: name: {form.name.data}, email: {form.email.data}, message: {form.message.data}").execute()
else:
flash('Your message was not transferred correctly.')
return redirect('/')
if __name__ == '__main__':
app.run(debug=True)
# @app.route("/signup_gitlab")
# def signup_gitlab():
# resp = gitlab.get("/user")
# if not gitlab.authorized:
# return redirect(url_for("gitlab.login"))
# print(resp)
# assert resp.ok
# user = User.query.filter_by(username=resp.json()['login']).first()
# username = resp.json()['login']
# gitlab_hash = user.gitlab_hash
# return redirect(f"/docs?username={username}&token={gitlab_hash}")
# def getGitlabRepoLanguage(repo):
# resp = requests.get(f"https://gitlab.com/api/v4/projects/{repo['id']}/languages").json()
# return next(iter(resp))
# def parseGitlabRepos(repos):
# parsedRepos = []
# for repo in repos:
# parsedRepo = {}
# parsedRepo['name'] = repo['name']
# if repo['description'] == None:
# parsedRepo['description'] = "No description provided"
# else:
# parsedRepo['description'] = repo['description']
# try:
# parsedRepo['issues'] = repo['open_issues_count']
# except:
# parsedRepo['issues'] = 0
# parsedRepo['owner'] = repo['namespace']['name']
# parsedRepo['stars'] = repo['star_count']
# parsedRepo['forks'] = repo['forks_count']
# parsedRepo['url'] = repo['web_url']
# try:
# parsedRepo['size'] = repo['statistics']['repository_size'],
# except:
# parsedRepo['size'] = None
# parsedRepo['language'] = getGitlabRepoLanguage(repo)
# parsedRepos.append(parsedRepo)
# return parsedRepos
| 35.908257 | 161 | 0.624681 | [
"MIT"
] | N1ght-Owls/hackathon | app.py | 7,828 | Python |
from pytest_bdd import given, when, then
from model.contact import Contact
import random
@given('a contact list')
def contact_list(db):
return db.get_contact_list()
@given('a contact with <firstname> and <lastname>')
def new_contact(firstname, lastname):
return Contact(firstname=firstname, lastname=lastname)
@when('I add the contact to the list')
def add_new_contact(app, new_contact):
app.contact.create(new_contact)
@then('the new contact list is equal to the old contact list with the added contact')
def verify_contact_added(db, contact_list, new_contact):
old_contacts = contact_list
new_contacts = db.get_contact_list()
old_contacts.append(new_contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
@given('a non-empty contact list')
def non_empty_contact_list(app, db):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname='any firstname'))
return db.get_contact_list()
@given('a random contact from the list')
def random_contact(non_empty_contact_list):
return random.choice(non_empty_contact_list)
@when('I delete the contact from the list')
def delete_contact(app, random_contact):
app.contact.delete_contact_by_id(random_contact.id)
@then('the new contact list is equal to the old contact list without the deleted contact')
def verify_contact_deleted(db, non_empty_contact_list, random_contact, app, check_ui):
old_contacts = non_empty_contact_list
new_contacts = db.get_contact_list()
old_contacts.remove(random_contact)
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
@given('a non-empty contact list')
def non_empty_contact_list(app, db):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname='any firstname'))
return db.get_contact_list()
@when('I modify the contact from the list')
def modify_contact(app, random_contact):
contact = Contact(firstname="New firstname")
app.contact.modify_contact_by_id(random_contact.id, contact)
@then('the new contact list is equal to the old contact list without the deleted contact')
def verify_contact_deleted(db, non_empty_contact_list, random_contact, app, check_ui):
old_contacts = non_empty_contact_list
index = old_contacts.index(random_contact)
new_contacts = db.get_contact_list()
old_contacts[index] = random_contact
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
| 39.134328 | 123 | 0.764302 | [
"Apache-2.0"
] | kasiazubielik/python_training | bdd/contact_steps.py | 2,622 | Python |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'secure_auth_rest.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.909091 | 80 | 0.687107 | [
"MIT"
] | justindjeumenet/secure_auth_rest | manage.py | 636 | Python |
ownerclass = 'AppDelegate'
ownerimport = 'AppDelegate.h'
# Init
result = Window(330, 110, "Tell me your name!")
nameLabel = Label(result, text="Name:")
nameLabel.width = 45
nameField = TextField(result, text="")
helloLabel = Label(result, text="")
button = Button(result, title="Say Hello", action=Action(owner, 'sayHello'))
button.width = 100
# Owner Assignments
owner.nameField = nameField
owner.helloLabel = helloLabel
# Layout
nameLabel.moveTo(Pack.UpperLeft)
nameField.moveNextTo(nameLabel, Pack.Right, Pack.Middle)
nameField.fill(Pack.Right)
helloLabel.moveNextTo(nameLabel, Pack.Below, Pack.Left)
helloLabel.fill(Pack.Right)
button.moveNextTo(helloLabel, Pack.Below, Pack.Right)
nameField.setAnchor(Pack.UpperLeft, growX=True)
helloLabel.setAnchor(Pack.UpperLeft, growX=True)
button.setAnchor(Pack.UpperRight)
| 30.37037 | 76 | 0.776829 | [
"BSD-3-Clause"
] | hsoft/xibless | demos/localized/MainWindow.py | 820 | Python |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterternary.marker.colorbar"
_path_str = "scatterternary.marker.colorbar.tickfont"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterternary
.marker.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterternary.marker.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterternary.marker.colorbar.Tickfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 37.469298 | 84 | 0.569004 | [
"MIT"
] | 1abner1/plotly.py | packages/python/plotly/plotly/graph_objs/scatterternary/marker/colorbar/_tickfont.py | 8,543 | Python |
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
| 40.200747 | 523 | 0.68373 | [
"Apache-2.0"
] | JamiesZhang/Storm | bin/storm.py | 43,055 | Python |
"""
The pyinspirehep is A python wrapper for Inspirehep API.
"""
from pyinspirehep.client import Client | 20.8 | 56 | 0.778846 | [
"MIT"
] | javadebadi/pyinspirehep | pyinspirehep/__init__.py | 104 | Python |
from envparse import env
env.read_envfile(".env")
BOT_TOKEN = env.str("BOT_TOKEN") | 14.166667 | 32 | 0.741176 | [
"MIT"
] | exthrempty/vkbottle-bot-template | src/config.py | 85 | Python |
# twitter_app/iris_classifier.py
import os
import pickle
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
MODEL_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "models", "latest_model.pkl")
def train_and_save_model():
print("TRAINING THE MODEL...")
X, y = load_iris(return_X_y=True)
#print(type(X), X.shape) #> <class 'numpy.ndarray'> (150, 4)
#print(type(y), y.shape) #> <class 'numpy.ndarray'> (150,)
classifier = LogisticRegression() # for example
classifier.fit(X, y)
print("SAVING THE MODEL...")
with open(MODEL_FILEPATH, "wb") as model_file:
pickle.dump(classifier, model_file)
return classifier
def load_model():
print("LOADING THE MODEL...")
with open(MODEL_FILEPATH, "rb") as model_file:
saved_model = pickle.load(model_file)
return saved_model
if __name__ == "__main__":
#train_and_save_model()
clf = load_model()
print("CLASSIFIER:", clf)
X, y = load_iris(return_X_y=True) # just to have some data to use when predicting
inputs = X[:2, :]
print(type(inputs), inputs)
result = clf.predict(inputs)
print("RESULT:", result) | 27.697674 | 92 | 0.677582 | [
"MIT"
] | Struth-Rourke/twitter_flask_app | twitter_app/iris_classifier.py | 1,191 | Python |
"""BOM data 'collector' that downloads the observation data."""
import asyncio
import datetime
import aiohttp
import logging
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = datetime.timedelta(minutes=10)
BASE_URL = "https://api.weather.bom.gov.au"
DAILY_FORECASTS_URL = "/v1/locations/{}/forecasts/daily"
LOCATIONS_URL = "/v1/locations/{}"
MDI_ICON_MAP = {
"clear": "mdi:weather-night",
"cloudy": "mdi:weather-cloudy",
"cyclone": "mdi:weather-hurricane",
"dust": "mdi:weather-hazy",
"dusty": "mdi:weather-hazy",
"fog": "mdi:weather-fog",
"frost": "mdi:snowflake-melt",
"haze": "mdi:weather-hazy",
"hazy": "mdi:weather-hazy",
"heavy_shower": "mdi:weather-pouring",
"heavy_showers": "mdi:weather-pouring",
"light_rain": "mdi:weather-partly-rainy",
"light_shower": "mdi:weather-light-showers",
"light_showers": "mdi:weather-light-showers",
"mostly_sunny": "mdi:weather-sunny",
"partly_cloudy": "mdi:weather-partly-cloudy",
"rain": "mdi:weather-pouring",
"shower": "mdi:weather-rainy",
"showers": "mdi:weather-rainy",
"snow": "mdi:weather-snowy",
"storm": "mdi:weather-lightning-rainy",
"storms": "mdi:weather-lightning-rainy",
"sunny": "mdi:weather-sunny",
"tropical_cyclone": "mdi:weather-hurricane",
"wind": "mdi:weather-windy",
"windy": "mdi:weather-windy",
None: None,
}
OBSERVATIONS_URL = "https://api.weather.bom.gov.au/v1/locations/{}/observations"
UV_MAP = {
"extreme": "Extreme",
"veryhigh": "Very High",
"high": "High",
"moderate": "Moderate",
"low": "Low",
None: None,
}
class Collector:
"""Data collector for BOM integration."""
def __init__(self, latitude, longitude):
"""Init BOM data collector."""
self.observations_data = None
self.daily_forecasts_data = None
self.geohash = self.geohash_encode(latitude, longitude)
_LOGGER.debug(f"geohash: {self.geohash}")
async def get_location_name(self):
"""Get JSON location name from BOM API endpoint."""
url = BASE_URL + LOCATIONS_URL.format(self.geohash)
async with aiohttp.ClientSession() as session:
response = await session.get(url)
if response is not None and response.status == 200:
locations_data = await response.json()
self.location_name = locations_data["data"]["name"]
return True
async def get_observations_data(self):
"""Get JSON observations data from BOM API endpoint."""
url = OBSERVATIONS_URL.format(self.geohash)
async with aiohttp.ClientSession() as session:
response = await session.get(url)
if response is not None and response.status == 200:
self.observations_data = await response.json()
await self.format_observations_data()
async def format_observations_data(self):
"""Flatten out wind and gust data."""
flattened = {}
wind = self.observations_data["data"]["wind"]
flattened["wind_speed_kilometre"] = wind["speed_kilometre"]
flattened["wind_speed_knot"] = wind["speed_knot"]
flattened["wind_direction"] = wind["direction"]
if self.observations_data["data"]["gust"] is not None:
gust = self.observations_data["data"]["gust"]
flattened["gust_speed_kilometre"] = gust["speed_kilometre"]
flattened["gust_speed_knot"] = gust["speed_knot"]
else:
flattened["gust_speed_kilometre"] = None
flattened["gust_speed_knot"] = None
self.observations_data["data"].update(flattened)
async def get_daily_forecasts_data(self):
"""Get JSON daily forecasts data from BOM API endpoint."""
url = BASE_URL + DAILY_FORECASTS_URL.format(self.geohash)
async with aiohttp.ClientSession() as session:
response = await session.get(url)
if response is not None and response.status == 200:
self.daily_forecasts_data = await response.json()
await self.format_forecast_data()
async def format_forecast_data(self):
"""Flatten out forecast data."""
flattened = {}
days = len(self.daily_forecasts_data["data"])
for day in range(0, days):
icon = self.daily_forecasts_data["data"][day]["icon_descriptor"]
flattened["mdi_icon"] = MDI_ICON_MAP[icon]
uv = self.daily_forecasts_data["data"][day]["uv"]
flattened["uv_category"] = UV_MAP[uv["category"]]
flattened["uv_max_index"] = uv["max_index"]
flattened["uv_start_time"] = uv["start_time"]
flattened["uv_end_time"] = uv["end_time"]
rain = self.daily_forecasts_data["data"][day]["rain"]
flattened["rain_chance"] = rain["chance"]
flattened["rain_amount_min"] = rain["amount"]["min"]
# When rain amount max is None, set as rain amount min
if rain["amount"]["max"] is None:
flattened["rain_amount_max"] = flattened["rain_amount_min"]
flattened["rain_amount_range"] = rain["amount"]["min"]
else:
flattened["rain_amount_max"] = rain["amount"]["max"]
flattened["rain_amount_range"] = "{} to {}".format(
rain["amount"]["min"],
rain["amount"]["max"],
)
self.daily_forecasts_data["data"][day].update(flattened)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Refresh the data on the collector object."""
await self.get_observations_data()
await self.get_daily_forecasts_data()
def geohash_encode(self, latitude, longitude, precision=6):
base32 = '0123456789bcdefghjkmnpqrstuvwxyz'
lat_interval = (-90.0, 90.0)
lon_interval = (-180.0, 180.0)
geohash = []
bits = [16, 8, 4, 2, 1]
bit = 0
ch = 0
even = True
while len(geohash) < precision:
if even:
mid = (lon_interval[0] + lon_interval[1]) / 2
if longitude > mid:
ch |= bits[bit]
lon_interval = (mid, lon_interval[1])
else:
lon_interval = (lon_interval[0], mid)
else:
mid = (lat_interval[0] + lat_interval[1]) / 2
if latitude > mid:
ch |= bits[bit]
lat_interval = (mid, lat_interval[1])
else:
lat_interval = (lat_interval[0], mid)
even = not even
if bit < 4:
bit += 1
else:
geohash += base32[ch]
bit = 0
ch = 0
return ''.join(geohash)
| 36.648936 | 80 | 0.594194 | [
"Unlicense"
] | QziP22/HomeAssistantConfig | custom_components/bureau_of_meteorology/PyBoM/collector.py | 6,890 | Python |
def autonomous_setup():
pass
def autonomous_main():
pass
def teleop_setup():
pass
def teleop_main():
pass
| 10.416667 | 23 | 0.656 | [
"Apache-2.0"
] | DanielMolina24/website | assets/student-resources/blank_template.py | 125 | Python |
from unittest import TestCase
from webtest import TestApp
from ironic_inventory.tests import FunctionalTest
class TestRootController(FunctionalTest):
def test_get(self):
response = self.app.get('/')
assert response.status_int == 200
def test_search(self):
response = self.app.post('/', params={'q': 'RestController'})
assert response.status_int == 302
assert response.headers['Location'] == (
'http://pecan.readthedocs.org/en/latest/search.html'
'?q=RestController'
)
def test_get_not_found(self):
response = self.app.get('/a/bogus/url', expect_errors=True)
assert response.status_int == 404
| 30.304348 | 69 | 0.657102 | [
"Apache-2.0"
] | softlayer/ironic-inventory-integrator | ironic_inventory/tests/test_functional.py | 697 | Python |
class Solution:
def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
if endWord not in wordList:
return 0
result = 0
l = len(beginWord)
beginSet = {beginWord}
endSet = {endWord}
wordList = set(wordList)
while beginSet or endSet:
result += 1
if len(beginSet) < len(endSet):
beginSet, endSet = endSet, beginSet
newSet = set()
for word in beginSet:
for index in range(l):
for c in string.ascii_lowercase:
newWord = word[:index] + c + word[index + 1:]
if newWord in endSet:
return result + 1
if newWord not in wordList:
continue
wordList.remove(newWord)
newSet.add(newWord)
beginSet = newSet
return 0
| 32.027027 | 85 | 0.389873 | [
"MIT"
] | udcymen/leetcode | Python/questions/WordLadder/word-ladder.py | 1,185 | Python |
# SPDX-License-Identifier: MIT
# Copyright (c) 2018-2022 Amano Team
import os
import shutil
import tempfile
from PIL import Image
from pyrogram import Client, filters
from pyrogram.enums import MessageEntityType
from pyrogram.errors import PeerIdInvalid, StickersetInvalid
from pyrogram.raw.functions.messages import GetStickerSet, SendMedia
from pyrogram.raw.functions.stickers import AddStickerToSet, CreateStickerSet
from pyrogram.raw.types import (
DocumentAttributeFilename,
InputDocument,
InputMediaUploadedDocument,
InputStickerSetItem,
InputStickerSetShortName,
)
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, Message
from eduu.config import LOG_CHAT, PREFIXES
from eduu.utils import EMOJI_PATTERN, http
from eduu.utils.localization import use_chat_lang
@Client.on_message(filters.command(["kang", "kibe", "steal"], PREFIXES))
@use_chat_lang()
async def kang_sticker(c: Client, m: Message, strings):
prog_msg = await m.reply_text(strings("kanging_sticker_msg"))
bot_username = c.me.username
sticker_emoji = "🤔"
packnum = 0
packname_found = False
resize = False
animated = False
reply = m.reply_to_message
user = await c.resolve_peer(m.from_user.username or m.from_user.id)
if reply and reply.media:
if reply.photo:
resize = True
elif reply.document:
if "image" in reply.document.mime_type:
# mime_type: image/webp
resize = True
elif "tgsticker" in reply.document.mime_type:
# mime_type: application/x-tgsticker
animated = True
elif reply.sticker:
if not reply.sticker.file_name:
return await prog_msg.edit_text(strings("err_sticker_no_file_name"))
if reply.sticker.emoji:
sticker_emoji = reply.sticker.emoji
animated = reply.sticker.is_animated
if not reply.sticker.file_name.endswith(".tgs"):
resize = True
else:
return await prog_msg.edit_text(strings("invalid_media_string"))
pack_prefix = "anim" if animated else "a"
packname = f"{pack_prefix}_{m.from_user.id}_by_{bot_username}"
if len(m.command) > 1:
if m.command[1].isdigit() and int(m.command[1]) > 0:
# provide pack number to kang in desired pack
packnum = m.command.pop(1)
packname = f"{pack_prefix}{packnum}_{m.from_user.id}_by_{bot_username}"
if len(m.command) > 1:
# matches all valid emojis in input
sticker_emoji = (
"".join(set(EMOJI_PATTERN.findall("".join(m.command[1:]))))
or sticker_emoji
)
filename = await c.download_media(m.reply_to_message)
if not filename:
# Failed to download
await prog_msg.delete()
return
elif m.entities and len(m.entities) > 1:
packname = f"a_{m.from_user.id}_by_{bot_username}"
pack_prefix = "a"
# searching if image_url is given
img_url = None
filename = "sticker.png"
for y in m.entities:
if y.type == MessageEntityType.URL:
img_url = m.text[y.offset : (y.offset + y.length)]
break
if not img_url:
await prog_msg.delete()
return
try:
r = await http.get(img_url)
if r.status_code == 200:
with open(filename, mode="wb") as f:
f.write(r.read())
except Exception as r_e:
return await prog_msg.edit_text(f"{r_e.__class__.__name__} : {r_e}")
if len(m.command) > 2:
# m.command[1] is image_url
if m.command[2].isdigit() and int(m.command[2]) > 0:
packnum = m.command.pop(2)
packname = f"a{packnum}_{m.from_user.id}_by_{bot_username}"
if len(m.command) > 2:
sticker_emoji = (
"".join(set(EMOJI_PATTERN.findall("".join(m.command[2:]))))
or sticker_emoji
)
resize = True
else:
return await prog_msg.delete()
try:
if resize:
filename = resize_image(filename)
max_stickers = 50 if animated else 120
while not packname_found:
try:
stickerset = await c.invoke(
GetStickerSet(
stickerset=InputStickerSetShortName(short_name=packname),
hash=0,
)
)
if stickerset.set.count >= max_stickers:
packnum += 1
packname = (
f"{pack_prefix}_{packnum}_{m.from_user.id}_by_{bot_username}"
)
else:
packname_found = True
except StickersetInvalid:
break
file = await c.save_file(filename)
media = await c.invoke(
SendMedia(
peer=(await c.resolve_peer(LOG_CHAT)),
media=InputMediaUploadedDocument(
file=file,
mime_type=c.guess_mime_type(filename),
attributes=[DocumentAttributeFilename(file_name=filename)],
),
message=f"#Sticker kang by UserID -> {m.from_user.id}",
random_id=c.rnd_id(),
)
)
stkr_file = media.updates[-1].message.media.document
if packname_found:
await prog_msg.edit_text(strings("use_existing_pack"))
await c.invoke(
AddStickerToSet(
stickerset=InputStickerSetShortName(short_name=packname),
sticker=InputStickerSetItem(
document=InputDocument(
id=stkr_file.id,
access_hash=stkr_file.access_hash,
file_reference=stkr_file.file_reference,
),
emoji=sticker_emoji,
),
)
)
else:
await prog_msg.edit_text(strings("create_new_pack_string"))
u_name = m.from_user.username
if u_name:
u_name = f"@{u_name}"
else:
u_name = str(m.from_user.id)
stkr_title = f"{u_name}'s "
if animated:
stkr_title += "Anim. "
stkr_title += "EduuPack"
if packnum != 0:
stkr_title += f" v{packnum}"
try:
await c.invoke(
CreateStickerSet(
user_id=user,
title=stkr_title,
short_name=packname,
stickers=[
InputStickerSetItem(
document=InputDocument(
id=stkr_file.id,
access_hash=stkr_file.access_hash,
file_reference=stkr_file.file_reference,
),
emoji=sticker_emoji,
)
],
animated=animated,
)
)
except PeerIdInvalid:
return await prog_msg.edit_text(
strings("cant_create_sticker_pack_string"),
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
"/start", url=f"https://t.me/{bot_username}?start"
)
]
]
),
)
except Exception as all_e:
await prog_msg.edit_text(f"{all_e.__class__.__name__} : {all_e}")
else:
markup = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
strings("view_sticker_pack_btn"),
url=f"t.me/addstickers/{packname}",
)
]
]
)
kanged_success_msg = strings("sticker_kanged_string")
await prog_msg.edit_text(
kanged_success_msg.format(sticker_emoji=sticker_emoji), reply_markup=markup
)
# Cleanup
try:
os.remove(filename)
except OSError:
pass
def resize_image(filename: str) -> str:
im = Image.open(filename)
maxsize = 512
scale = maxsize / max(im.width, im.height)
sizenew = (int(im.width * scale), int(im.height * scale))
im = im.resize(sizenew, Image.NEAREST)
downpath, f_name = os.path.split(filename)
# not hardcoding png_image as "sticker.png"
png_image = os.path.join(downpath, f"{f_name.split('.', 1)[0]}.png")
im.save(png_image, "PNG")
if png_image != filename:
os.remove(filename)
return png_image
@Client.on_message(filters.command("stickerid", PREFIXES) & filters.reply)
@use_chat_lang()
async def getstickerid(c: Client, m: Message, strings):
if m.reply_to_message.sticker:
await m.reply_text(
strings("get_sticker_id_string").format(
stickerid=m.reply_to_message.sticker.file_id
)
)
@Client.on_message(filters.command("getsticker", PREFIXES) & filters.reply)
@use_chat_lang()
async def getstickeraspng(c: Client, m: Message, strings):
sticker = m.reply_to_message.sticker
if sticker:
if sticker.is_animated:
await m.reply_text(strings("animated_not_supported"))
elif not sticker.is_animated:
with tempfile.TemporaryDirectory() as tempdir:
path = os.path.join(tempdir, "getsticker")
sticker_file = await c.download_media(
message=m.reply_to_message,
file_name=f"{path}/{sticker.set_name}.png",
)
await m.reply_to_message.reply_document(
document=sticker_file,
caption=strings("sticker_info").format(
emoji=sticker.emoji, id=sticker.file_id
),
)
shutil.rmtree(tempdir, ignore_errors=True)
else:
await m.reply_text(strings("not_sticker"))
| 37.821429 | 87 | 0.533428 | [
"MIT"
] | MikeOwino/EduuRobot | eduu/plugins/stickers.py | 10,593 | Python |
# coding: utf-8
import arrow
from flask import current_app, request, g
from itsdangerous import TimedJSONWebSignatureSerializer as JWT
from actor_libs.errors import AuthFailed
from app.models import Application, User
__all__ = ['basic_auth', 'token_auth']
def basic_auth(username, password) -> bool:
""" HTTP basic authorization """
query_result = Application.query \
.join(User, User.id == Application.userIntID) \
.with_entities(Application, User) \
.filter(Application.appStatus == 1, User.enable == 1,
Application.appID == username).first()
if not query_result:
raise AuthFailed(field='appID')
application, user = query_result
# Verify that app is available
date_now = arrow.now().naive
if application.expiredAt and date_now > application.expiredAt:
raise AuthFailed(field='expiredAt')
if application.appToken != password:
raise AuthFailed(field='appToken')
g.user_id: int = user.id
g.tenant_uid: str = user.tenantID
g.role_id: int = application.roleIntID
g.app_uid: str = application.appID
user.lastRequestTime = date_now # Update user active time
user.update()
return True
def token_auth(token) -> bool:
""" HTTP bearer token authorization """
jwt = JWT(current_app.config['SECRET_KEY'])
try:
data = jwt.loads(token)
except Exception:
raise AuthFailed(field='token')
if data.get('consumer_id'):
# todo consumer user auth ?
...
else:
# Normal user
if ('user_id' or 'role_id') not in data:
raise AuthFailed(field='token')
if data['role_id'] != 1 and not data.get('tenant_uid'):
raise AuthFailed(field='token')
user = User.query \
.filter(User.roleIntID == data['role_id'], User.id == data['user_id'],
User.tenantID == data['tenant_uid']).first()
if not user:
raise AuthFailed(field='token')
g.user_id: int = user.id
g.tenant_uid: str = user.tenantID
g.role_id: int = user.roleIntID
g.app_uid: str = None
g.user_auth_type: int = user.userAuthType
user.lastRequestTime = arrow.now().naive
user.update()
return True
| 32.070423 | 82 | 0.635485 | [
"Apache-2.0"
] | Mateus-dang/ActorCloud | server/actor_libs/auth/base.py | 2,277 | Python |
# *****************************************************************************
# Copyright (c) 2019 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
import testUtils
class TestRegistryStatus(testUtils.AbstractTest):
# =========================================================================
# Service Status
# =========================================================================
def testStatus(self):
status = self.appClient.status.serviceStatus()
assert status.region == "us"
assert status.dashboard in ["green", "orange", "red"]
assert status.messaging in ["green", "orange", "red"]
assert status.thirdParty in ["green", "orange", "red"]
| 41.56 | 79 | 0.480269 | [
"EPL-1.0"
] | cesariojr/iot-python | test/test_api_status.py | 1,039 | Python |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../useis'))
# -- Project information -----------------------------------------------------
project = 'useis'
copyright = '2021, Jean-Philippe Mercier'
author = 'Jean-Philippe Mercier'
# The full version, including alpha/beta/rc tags
release = '"0.5.0"'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'rinoh.frontend.sphinx',
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.coverage'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | 33.983607 | 79 | 0.662808 | [
"MIT"
] | jeanphilippemercier/uquake-useis | docs/source/conf.py | 2,073 | Python |
"""Amazon Neptune Module."""
import logging
import re
from typing import Any
import pandas as pd
from gremlin_python.process.graph_traversal import GraphTraversalSource, __
from gremlin_python.process.translator import Translator
from gremlin_python.process.traversal import Cardinality, T
from gremlin_python.structure.graph import Graph
from awswrangler import exceptions
from awswrangler.neptune.client import NeptuneClient
_logger: logging.Logger = logging.getLogger(__name__)
def execute_gremlin(client: NeptuneClient, query: str) -> pd.DataFrame:
"""Return results of a Gremlin traversal as pandas dataframe.
Parameters
----------
client : neptune.Client
instance of the neptune client to use
traversal : str
The gremlin traversal to execute
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Results as Pandas DataFrame
Examples
--------
Run a Gremlin Query
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> df = wr.neptune.execute_gremlin(client, "g.V().limit(1)")
"""
results = client.read_gremlin(query)
df = pd.DataFrame.from_records(results)
return df
def execute_opencypher(client: NeptuneClient, query: str) -> pd.DataFrame:
"""Return results of a openCypher traversal as pandas dataframe.
Parameters
----------
client : NeptuneClient
instance of the neptune client to use
query : str
The openCypher query to execute
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Results as Pandas DataFrame
Examples
--------
Run an openCypher query
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> resp = wr.neptune.execute_opencypher(client, "MATCH (n) RETURN n LIMIT 1")
"""
resp = client.read_opencypher(query)
df = pd.DataFrame.from_dict(resp)
return df
def execute_sparql(client: NeptuneClient, query: str) -> pd.DataFrame:
"""Return results of a SPARQL query as pandas dataframe.
Parameters
----------
client : NeptuneClient
instance of the neptune client to use
query : str
The SPARQL traversal to execute
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Results as Pandas DataFrame
Examples
--------
Run a SPARQL query
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> df = wr.neptune.execute_sparql(client, "PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?name
WHERE {
?person foaf:name ?name .
"""
data = client.read_sparql(query)
df = None
if "results" in data and "bindings" in data["results"]:
df = pd.DataFrame(data["results"]["bindings"])
df.applymap(lambda x: x["value"])
else:
df = pd.DataFrame(data)
return df
def to_property_graph(
client: NeptuneClient, df: pd.DataFrame, batch_size: int = 50, use_header_cardinality: bool = True
) -> bool:
"""Write records stored in a DataFrame into Amazon Neptune.
If writing to a property graph then DataFrames for vertices and edges must be written separately.
DataFrames for vertices must have a ~label column with the label and a ~id column for the vertex id.
If the ~id column does not exist, the specified id does not exists, or is empty then a new vertex will be added.
If no ~label column exists an exception will be thrown.
DataFrames for edges must have a ~id, ~label, ~to, and ~from column. If the ~id column does not exist
the specified id does not exists, or is empty then a new edge will be added. If no ~label, ~to, or ~from column
exists an exception will be thrown.
If you would like to save data using `single` cardinality then you can postfix (single) to the column header and
set use_header_cardinality=True (default). e.g. A column named `name(single)` will save the `name` property
as single
cardinality. You can disable this by setting by setting `use_header_cardinality=False`.
Parameters
----------
client : NeptuneClient
instance of the neptune client to use
df : pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
batch_size: int
The number of rows to save at a time. Default 50
use_header_cardinality: bool
If True, then the header cardinality will be used to save the data. Default True
Returns
-------
bool
True if records were written
Examples
--------
Writing to Amazon Neptune
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> wr.neptune.gremlin.to_property_graph(
... df=df
... )
"""
# check if ~id and ~label column exist and if not throw error
g = Graph().traversal()
is_edge_df = False
is_update_df = True
if "~id" in df.columns:
if "~label" in df.columns:
is_update_df = False
if "~to" in df.columns and "~from" in df.columns:
is_edge_df = True
else:
raise exceptions.InvalidArgumentValue(
"Dataframe must contain at least a ~id and a ~label column to be saved to Amazon Neptune"
)
# Loop through items in the DF
for (index, row) in df.iterrows():
# build up a query
if is_update_df:
g = _build_gremlin_update(g, row, use_header_cardinality)
elif is_edge_df:
g = _build_gremlin_insert_edges(g, row.to_dict(), use_header_cardinality)
else:
g = _build_gremlin_insert_vertices(g, row.to_dict(), use_header_cardinality)
# run the query
if index > 0 and index % batch_size == 0:
res = _run_gremlin_insert(client, g)
if res:
g = Graph().traversal()
return _run_gremlin_insert(client, g)
def to_rdf_graph(
client: NeptuneClient,
df: pd.DataFrame,
batch_size: int = 50,
subject_column: str = "s",
predicate_column: str = "p",
object_column: str = "o",
graph_column: str = "g",
) -> bool:
"""Write records stored in a DataFrame into Amazon Neptune.
The DataFrame must consist of triples with column names for the subject, predicate, and object specified.
If you want to add data into a named graph then you will also need the graph column.
Parameters
----------
client (NeptuneClient) :
instance of the neptune client to use
df (pandas.DataFrame) :
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
subject_column (str, optional) :
The column name in the dataframe for the subject. Defaults to 's'
predicate_column (str, optional) :
The column name in the dataframe for the predicate. Defaults to 'p'
object_column (str, optional) :
The column name in the dataframe for the object. Defaults to 'o'
graph_column (str, optional) :
The column name in the dataframe for the graph if sending across quads. Defaults to 'g'
Returns
-------
bool
True if records were written
Examples
--------
Writing to Amazon Neptune
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> wr.neptune.gremlin.to_rdf_graph(
... df=df
... )
"""
is_quads = False
if pd.Series([subject_column, object_column, predicate_column]).isin(df.columns).all():
if graph_column in df.columns:
is_quads = True
else:
raise exceptions.InvalidArgumentValue(
"""Dataframe must contain at least the subject, predicate, and object columns defined or the defaults
(s, p, o) to be saved to Amazon Neptune"""
)
query = ""
# Loop through items in the DF
for (index, row) in df.iterrows():
# build up a query
if is_quads:
insert = f"""INSERT DATA {{ GRAPH <{row[graph_column]}> {{<{row[subject_column]}>
<{str(row[predicate_column])}> <{row[object_column]}> . }} }}; """
query = query + insert
else:
insert = f"""INSERT DATA {{ <{row[subject_column]}> <{str(row[predicate_column])}>
<{row[object_column]}> . }}; """
query = query + insert
# run the query
if index > 0 and index % batch_size == 0:
res = client.write_sparql(query)
if res:
query = ""
return client.write_sparql(query)
def connect(host: str, port: int, iam_enabled: bool = False, **kwargs: Any) -> NeptuneClient:
"""Create a connection to a Neptune cluster.
Parameters
----------
host : str
The host endpoint to connect to
port : int
The port endpoint to connect to
iam_enabled : bool, optional
True if IAM is enabled on the cluster. Defaults to False.
Returns
-------
NeptuneClient
[description]
"""
return NeptuneClient(host, port, iam_enabled, **kwargs)
def _get_column_name(column: str) -> str:
if "(single)" in column.lower():
return re.compile(r"\(single\)", re.IGNORECASE).sub("", column)
return column
def _set_properties(g: GraphTraversalSource, use_header_cardinality: bool, row: Any) -> GraphTraversalSource:
for (column, value) in row.items():
if column not in ["~id", "~label", "~to", "~from"]:
# If the column header is specifying the cardinality then use it
if use_header_cardinality:
if column.lower().find("(single)") > 0 and pd.notna(value):
g = g.property(Cardinality.single, _get_column_name(column), value)
else:
g = _expand_properties(g, _get_column_name(column), value)
else:
# If not using header cardinality then use the default of set
g = _expand_properties(g, column, value)
return g
def _expand_properties(g: GraphTraversalSource, column: str, value: Any) -> GraphTraversalSource:
# If this is a list then expand it out into multiple property calls
if isinstance(value, list) and len(value) > 0:
for item in value:
g = g.property(Cardinality.set_, column, item)
elif pd.notna(value):
g = g.property(Cardinality.set_, column, value)
return g
def _build_gremlin_update(g: GraphTraversalSource, row: Any, use_header_cardinality: bool) -> GraphTraversalSource:
g = g.V(str(row["~id"]))
g = _set_properties(g, use_header_cardinality, row)
return g
def _build_gremlin_insert_vertices(
g: GraphTraversalSource, row: Any, use_header_cardinality: bool = False
) -> GraphTraversalSource:
g = g.V(str(row["~id"])).fold().coalesce(__.unfold(), __.addV(row["~label"]).property(T.id, str(row["~id"])))
g = _set_properties(g, use_header_cardinality, row)
return g
def _build_gremlin_insert_edges(
g: GraphTraversalSource, row: pd.Series, use_header_cardinality: bool
) -> GraphTraversalSource:
g = (
g.V(str(row["~from"]))
.fold()
.coalesce(__.unfold(), _build_gremlin_insert_vertices(__, {"~id": row["~from"], "~label": "Vertex"}))
.addE(row["~label"])
.property(T.id, str(row["~id"]))
.to(
__.V(str(row["~to"]))
.fold()
.coalesce(__.unfold(), _build_gremlin_insert_vertices(__, {"~id": row["~to"], "~label": "Vertex"}))
)
)
g = _set_properties(g, use_header_cardinality, row)
return g
def _run_gremlin_insert(client: NeptuneClient, g: GraphTraversalSource) -> bool:
translator = Translator("g")
s = translator.translate(g.bytecode)
s = s.replace("Cardinality.", "") # hack to fix parser error for set cardinality
_logger.debug(s)
res = client.write_gremlin(s)
return res
def flatten_nested_df(
df: pd.DataFrame, include_prefix: bool = True, seperator: str = "_", recursive: bool = True
) -> pd.DataFrame:
"""Flatten the lists and dictionaries of the input data frame.
Parameters
----------
df : pd.DataFrame
The input data frame
include_prefix : bool, optional
If True, then it will prefix the new column name with the original column name.
Defaults to True.
seperator : str, optional
The seperator to use between field names when a dictionary is exploded.
Defaults to "_".
recursive : bool, optional
If True, then this will recurse the fields in the data frame. Defaults to True.
Returns
-------
pd.DataFrame: The flattened data frame
"""
if seperator is None:
seperator = "_"
df = df.reset_index()
# search for list and map
s = (df.applymap(type) == list).all()
list_columns = s[s].index.tolist()
s = (df.applymap(type) == dict).all()
dict_columns = s[s].index.tolist()
if len(list_columns) > 0 or len(dict_columns) > 0:
new_columns = []
for col in dict_columns:
# expand dictionaries horizontally
expanded = None
if include_prefix:
expanded = pd.json_normalize(df[col], sep=seperator).add_prefix(f"{col}{seperator}")
else:
expanded = pd.json_normalize(df[col], sep=seperator).add_prefix(f"{seperator}")
expanded.index = df.index
df = pd.concat([df, expanded], axis=1).drop(columns=[col])
new_columns.extend(expanded.columns)
for col in list_columns:
df = df.drop(columns=[col]).join(df[col].explode().to_frame())
new_columns.append(col)
# check if there are still dict o list fields to flatten
s = (df[new_columns].applymap(type) == list).all()
list_columns = s[s].index.tolist()
s = (df[new_columns].applymap(type) == dict).all()
dict_columns = s[s].index.tolist()
if recursive and (len(list_columns) > 0 or len(dict_columns) > 0):
df = flatten_nested_df(df, include_prefix=include_prefix, seperator=seperator, recursive=recursive)
return df
| 34.640288 | 116 | 0.638975 | [
"Apache-2.0"
] | minwook-shin/aws-data-wrangler | awswrangler/neptune/neptune.py | 14,445 | Python |
# The MIT License (MIT)
#
# Copyright (c) 2019 Paul Sajna for Adafruit Industries LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_imageload.gif`
====================================================
Load pixel values (indices or colors) into one or more bitmaps and colors into a palette from a GIF file.
* Author(s): Paul Sajna
"""
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_ImageLoad.git"
bitmaps = []
def load(f):
bitmaps = []
palette = []
table = []
f.seek(3)
version = f.read(3)
if (version != b'89a') and (version != b'87a'):
raise RuntimeError("Invalid GIF version")
width = int.from_bytes(f.read(2), 'little')
height = int.from_bytes(f.read(2), 'little')
gct_header = int.from_bytes(f.read(1), 'little')
if (gct_header & 0b10000000) != 0b10000000:
raise NotImplementedError("Only gifs with a global color table are supported")
#if (gct_header & 0b0111000 >> 3) + 1 != 8:
#raise NotImplementedError("Only 8-bit color is supported")
gct_size = 2 ** ((gct_header & 0b00000111) + 1)
bg_color_index = int.from_bytes(f.read(1), 'little')
f.seek(1, 1) # seek one byte relative to the current position (skip a byte)
for i in range(gct_size):
color = f.read(3)
palette.append(color)
while True:
separator = f.read(1)
if separator:
separator = int.from_bytes(separator, 'little')
if separator == 0x21:
# Extension
label = int.from_bytes(f.read(1), 'little')
if label == 0xf9:
# Graphic Control Extension
print("Graphic Control Extension")
f.seek(1,1)
packed = int.from_bytes(f.read(1), 'little')
# delay in seconds between frames
delay = int.from_bytes(f.read(2), 'little') / 100
# We only care about the transparency flag for now
if packed & 1 == 1:
transparency_index = int.from_bytes(f.read(1), 'little')
else:
f.seek(1,1)
f.seek(1,1)
elif label == 0xff:
# Application Extension
print("Application Extension")
f.seek(1,1)
application = f.read(8)
if application == b'NETSCAPE':
f.seek(5,1)
loop_count = int.from_bytes(f.read(2), 'little')
f.seek(1,1)
else:
raise NotImplementedError("Unimplemented application extension: "
+ ''.join([chr(b) for b in application]))
elif label == 0xfe:
# Comment Extension
comment = b''
while not comment.endswith(b'\0'):
byte = f.read(1)
comment += byte
comment = ''.join([chr(b) for b in comment])
print(comment)
else:
raise NotImplementedError("Unimplemented extension: " + hex(label))
elif separator == 0x2c:
# Image Descriptor
print("Image Descriptor")
image_start_x = int.from_bytes(f.read(2), 'little')
image_start_y = int.from_bytes(f.read(2), 'little')
image_width = int.from_bytes(f.read(2), 'little')
image_height = int.from_bytes(f.read(2), 'little')
# Ignore the packed fields for now
f.seek(1,1)
# Image Data
print("Image Data")
lzw_code_size = int.from_bytes(f.read(1), 'little')
compressed = bytearray()
while True:
block_size = int.from_bytes(f.read(1), 'little')
if block_size == 0:
break
compressed += f.read(block_size)
bitmap = decompress(compressed, lzw_code_size)
bitmaps.append(bitmap)
elif separator == 0x3b:
# Trailer
break
else:
raise RuntimeError("Got an unexpected separator: " + hex(separator))
def decompress(block, min_code_size):
clear_code = 1 << min_code_size
eoi_code = clear_code + 1
cur_code_size = min_code_size + 1
bit_offset = 0
code_stream = []
index_stream = []
table = []
prev_code = None
nextcode = clear_code + 2
while bit_offset < 8*(len(block)-1):
if nextcode == (1 << cur_code_size):
cur_code_size += 1
code = fetch_bits(block, cur_code_size, bit_offset)
#print(code, prev_code)
bit_offset += cur_code_size
if code == clear_code:
# print(table)
# print(len(table))
table = [[i] for i in range(1 << min_code_size)]
table.append([clear_code])
table.append([eoi_code])
# print(table)
nextcode = clear_code + 2
prev_code = None
print("table reset")
continue
elif code == eoi_code:
print("stop")
break
elif code < len(table):
index_stream.append(table[code])
k = [table[code][0]]
if prev_code is not None:
table.append(table[prev_code] + k)
nextcode +=1
elif prev_code is None:
raise ValueError("First code after a reset must be in the table")
else:
k = [table[prev_code][0]]
index_stream.append(table[prev_code] + k)
table.append(table[prev_code] + k)
nextcode +=1
prev_code = code
#nextcode = len(table)
index_stream = flatten(index_stream)
#print(index_stream)
return index_stream
def fetch_bits(bytearr, nbits, bit_offset):
byte_offset = bit_offset//8
rem = bit_offset % 8
bits = 0
for i in range(nbits):
bit = (bytearr[byte_offset] | (bytearr[byte_offset+1] << 8)) & (1 << (rem + i))
bits |= bit >> (rem)
return bits
def flatten(items, seqtypes=(list, tuple)):
for i, x in enumerate(items):
while i < len(items) and isinstance(items[i], seqtypes):
items[i:i+1] = items[i]
return items
| 39.747368 | 105 | 0.552039 | [
"MIT"
] | sajattack/Adafruit_CircuitPython_ImageLoad | adafruit_imageload/gif/__init__.py | 7,552 | Python |
import math
import ctypes
import pyglet
pyglet.options["shadow_window"] = False
pyglet.options["debug_gl"] = False
import pyglet.gl as gl
import matrix
import shader
import camera
import block_type
import texture_manager
class Window(pyglet.window.Window):
def __init__(self, **args):
super().__init__(**args)
# create blocks
self.texture_manager = texture_manager.Texture_manager(16, 16, 256)
self.cobblestone = block_type.Block_type(self.texture_manager, "cobblestone", {"all": "cobblestone"})
self.grass = block_type.Block_type(self.texture_manager, "grass", {"top": "grass", "bottom": "dirt", "sides": "grass_side"})
self.dirt = block_type.Block_type(self.texture_manager, "dirt", {"all": "dirt"})
self.stone = block_type.Block_type(self.texture_manager, "stone", {"all": "stone"})
self.sand = block_type.Block_type(self.texture_manager, "sand", {"all": "sand"})
self.planks = block_type.Block_type(self.texture_manager, "planks", {"all": "planks"})
self.log = block_type.Block_type(self.texture_manager, "log", {"top": "log_top", "bottom": "log_top", "sides": "log_side"})
self.texture_manager.generate_mipmaps()
# create vertex array object
self.vao = gl.GLuint(0)
gl.glGenVertexArrays(1, ctypes.byref(self.vao))
gl.glBindVertexArray(self.vao)
# create vertex position vbo
self.vertex_position_vbo = gl.GLuint(0)
gl.glGenBuffers(1, ctypes.byref(self.vertex_position_vbo))
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vertex_position_vbo)
gl.glBufferData(
gl.GL_ARRAY_BUFFER,
ctypes.sizeof(gl.GLfloat * len(self.grass.vertex_positions)),
(gl.GLfloat * len(self.grass.vertex_positions)) (*self.grass.vertex_positions),
gl.GL_STATIC_DRAW)
gl.glVertexAttribPointer(0, 3, gl.GL_FLOAT, gl.GL_FALSE, 0, 0)
gl.glEnableVertexAttribArray(0)
# create tex coord vbo
self.tex_coord_vbo = gl.GLuint(0)
gl.glGenBuffers(1, ctypes.byref(self.tex_coord_vbo))
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.tex_coord_vbo)
gl.glBufferData(
gl.GL_ARRAY_BUFFER,
ctypes.sizeof(gl.GLfloat * len(self.grass.tex_coords)),
(gl.GLfloat * len(self.grass.tex_coords)) (*self.grass.tex_coords),
gl.GL_STATIC_DRAW)
gl.glVertexAttribPointer(1, 3, gl.GL_FLOAT, gl.GL_FALSE, 0, 0)
gl.glEnableVertexAttribArray(1)
# create shading value vbo
self.shading_value_vbo = gl.GLuint(0)
gl.glGenBuffers(1, ctypes.byref(self.shading_value_vbo))
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.shading_value_vbo)
gl.glBufferData(
gl.GL_ARRAY_BUFFER,
ctypes.sizeof(gl.GLfloat * len(self.grass.shading_values)),
(gl.GLfloat * len(self.grass.shading_values)) (*self.grass.shading_values),
gl.GL_STATIC_DRAW)
gl.glVertexAttribPointer(2, 1, gl.GL_FLOAT, gl.GL_FALSE, 0, 0)
gl.glEnableVertexAttribArray(2)
# create index buffer object
self.ibo = gl.GLuint(0)
gl.glGenBuffers(1, self.ibo)
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ibo)
gl.glBufferData(
gl.GL_ELEMENT_ARRAY_BUFFER,
ctypes.sizeof(gl.GLuint * len(self.grass.indices)),
(gl.GLuint * len(self.grass.indices)) (*self.grass.indices),
gl.GL_STATIC_DRAW)
# create shader
self.shader = shader.Shader("vert.glsl", "frag.glsl")
self.shader_sampler_location = self.shader.find_uniform(b"texture_array_sampler")
self.shader.use()
# pyglet stuff
pyglet.clock.schedule_interval(self.update, 1.0 / 60)
self.mouse_captured = False
# camera stuff
self.camera = camera.Camera(self.shader, self.width, self.height)
def update(self, delta_time):
if not self.mouse_captured:
self.camera.input = [0, 0, 0]
self.camera.update_camera(delta_time)
def on_draw(self):
self.camera.update_matrices()
# bind textures
gl.glActiveTexture(gl.GL_TEXTURE0)
gl.glBindTexture(gl.GL_TEXTURE_2D_ARRAY, self.texture_manager.texture_array)
gl.glUniform1i(self.shader_sampler_location, 0)
# draw stuff
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glClearColor(0.0, 0.0, 0.0, 1.0)
self.clear()
gl.glDrawElements(
gl.GL_TRIANGLES,
len(self.grass.indices),
gl.GL_UNSIGNED_INT,
None)
# input functions
def on_resize(self, width, height):
print(f"Resize {width} * {height}")
gl.glViewport(0, 0, width, height)
self.camera.width = width
self.camera.height = height
def on_mouse_press(self, x, y, button, modifiers):
self.mouse_captured = not self.mouse_captured
self.set_exclusive_mouse(self.mouse_captured)
def on_mouse_motion(self, x, y, delta_x, delta_y):
if self.mouse_captured:
sensitivity = 0.004
self.camera.rotation[0] -= delta_x * sensitivity
self.camera.rotation[1] += delta_y * sensitivity
self.camera.rotation[1] = max(-math.tau / 4, min(math.tau / 4, self.camera.rotation[1]))
def on_key_press(self, key, modifiers):
if not self.mouse_captured:
return
if key == pyglet.window.key.D: self.camera.input[0] += 1
elif key == pyglet.window.key.A: self.camera.input[0] -= 1
elif key == pyglet.window.key.W: self.camera.input[2] += 1
elif key == pyglet.window.key.S: self.camera.input[2] -= 1
elif key == pyglet.window.key.SPACE : self.camera.input[1] += 1
elif key == pyglet.window.key.LSHIFT: self.camera.input[1] -= 1
def on_key_release(self, key, modifiers):
if not self.mouse_captured:
return
if key == pyglet.window.key.D: self.camera.input[0] -= 1
elif key == pyglet.window.key.A: self.camera.input[0] += 1
elif key == pyglet.window.key.W: self.camera.input[2] -= 1
elif key == pyglet.window.key.S: self.camera.input[2] += 1
elif key == pyglet.window.key.SPACE : self.camera.input[1] -= 1
elif key == pyglet.window.key.LSHIFT: self.camera.input[1] += 1
class Game:
def __init__(self):
self.config = gl.Config(major_version = 3, depth_size = 16)
self.window = Window(config = self.config, width = 800, height = 600, caption = "Minecraft clone", resizable = True, vsync = False)
def run(self):
pyglet.app.run()
if __name__ == "__main__":
game = Game()
game.run()
| 30.126263 | 133 | 0.718692 | [
"MIT"
] | StartForKillerMC/python-minecraft-clone | episode-7/main.py | 5,965 | Python |
from __future__ import print_function
from exodus import BaseMigration
class Migration(BaseMigration):
version = '2015_10_10'
def can_migrate_database(self, adapter):
return self.version > adapter.db.get('version', None)
def migrate_database(self, adapter):
# migrate the keys
adapter.db['c'] = adapter.db['a']
del adapter.db['a']
adapter.db['version'] = self.version
| 26.5 | 61 | 0.67217 | [
"BSD-2-Clause"
] | adamlwgriffiths/exodus | tests/jaweson_migrations/2015_10_10_move_keys.py | 424 | Python |
from flask import Flask, request, jsonify, send_from_directory
import engine
app = Flask(__name__)
@app.route('/api/texts')
def texts():
return send_from_directory('i18n', 'ui.de.json');
@app.route('/api/codenames')
def codenames():
return jsonify(engine.codenames())
@app.route('/api/ready')
def ready():
return jsonify(engine.ready())
@app.route('/api/clue', methods=['POST'])
def clue():
content = request.json
return jsonify(engine.clue(
our_agents=content['ourAgents'],
assassin=content['assassin'],
previous_clues=content['previousClues'],
min_related=content['minRelated'],
max_related=content['maxRelated']
))
@app.route('/api/guess', methods=['POST'])
def guess():
content = request.json
return jsonify(engine.guess(
codenames=content['codenames'],
word=content['word'],
number=content['number']
))
| 24.918919 | 62 | 0.649675 | [
"MIT"
] | alimfeld/codenames | server/server.py | 922 | Python |
import os
import re
def gen_sitemap(main_site, md_file):
pattern = re.compile(r': (.*?).md', re.S)
res = []
with open(md_file) as md:
for line in md.readlines():
line = str(line)
cur_urls = re.findall(pattern, line)
if len(cur_urls) > 0:
if cur_urls[0] == '/':
continue
res.append(main_site + cur_urls[0])
return res
if __name__ == '__main__':
print("生成wiki站的sitemap")
site_map = gen_sitemap('https://www.an.rustfisher.com/',
'/Users/rustfisher/Desktop/ws/wiki-ws/mk-android-wiki-proj/mk-an-wiki/mkdocs.yml')
print(len(site_map))
sitemap_file = 'a-sp.txt'
if os.path.exists(sitemap_file):
os.remove(sitemap_file)
with open(sitemap_file, 'w') as s:
for url in site_map:
s.write(url)
s.write('\n')
| 27.424242 | 109 | 0.550276 | [
"MIT"
] | RustFisher/python-playground | genmenu/gen_wiki_sitemap.py | 913 | Python |
from django.contrib.auth import get_user_model
User = get_user_model()
superuser_username = 'admin'
superuser_email = '[email protected]'
superuser_password = 'admin_test'
try:
User.objects.get(username=superuser_username)
except User.DoesNotExist:
User.objects.create_superuser(
superuser_username, superuser_email, superuser_password
)
| 24.266667 | 63 | 0.785714 | [
"MIT"
] | NeZanyat/django-project-starter | service/scripts/create_superuser.py | 364 | Python |
from django.db import models
from django.conf import settings
from django_extensions.db.models import TimeStampedModel
from djcommerce.utils import get_address_model
Address = get_address_model()
class Profile(TimeStampedModel):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete = models.CASCADE
)
addresses = models.ManyToManyField(Address)
class Meta:
abstract = False
if hasattr(settings,"PROFILE_MODEL"):
abstract = True
| 23.904762 | 56 | 0.723108 | [
"MIT"
] | tdsprogramming/djcommerce | djcommerce/models/profile.py | 502 | Python |
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import shutil
import sys
import tempfile
import unittest
from glob import glob
import nibabel as nib
import numpy as np
import torch
import monai
from monai.data import create_test_image_2d
from monai.engines import GanTrainer
from monai.engines.utils import GanKeys as Keys
from monai.handlers import CheckpointSaver, StatsHandler, TensorBoardStatsHandler
from monai.networks import normal_init
from monai.networks.nets import Discriminator, Generator
from monai.transforms import AsChannelFirstd, Compose, LoadImaged, RandFlipd, ScaleIntensityd, ToTensord
from monai.utils import set_determinism
from tests.utils import DistTestCase, TimedCall, skip_if_quick
def run_training_test(root_dir, device="cuda:0"):
real_images = sorted(glob(os.path.join(root_dir, "img*.nii.gz")))
train_files = [{"reals": img} for img in zip(real_images)]
# prepare real data
train_transforms = Compose(
[
LoadImaged(keys=["reals"]),
AsChannelFirstd(keys=["reals"]),
ScaleIntensityd(keys=["reals"]),
RandFlipd(keys=["reals"], prob=0.5),
ToTensord(keys=["reals"]),
]
)
train_ds = monai.data.CacheDataset(data=train_files, transform=train_transforms, cache_rate=0.5)
train_loader = monai.data.DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=4)
learning_rate = 2e-4
betas = (0.5, 0.999)
real_label = 1
fake_label = 0
# create discriminator
disc_net = Discriminator(
in_shape=(1, 64, 64), channels=(8, 16, 32, 64, 1), strides=(2, 2, 2, 2, 1), num_res_units=1, kernel_size=5
).to(device)
disc_net.apply(normal_init)
disc_opt = torch.optim.Adam(disc_net.parameters(), learning_rate, betas=betas)
disc_loss_criterion = torch.nn.BCELoss()
def discriminator_loss(gen_images, real_images):
real = real_images.new_full((real_images.shape[0], 1), real_label)
gen = gen_images.new_full((gen_images.shape[0], 1), fake_label)
realloss = disc_loss_criterion(disc_net(real_images), real)
genloss = disc_loss_criterion(disc_net(gen_images.detach()), gen)
return torch.div(torch.add(realloss, genloss), 2)
# create generator
latent_size = 64
gen_net = Generator(
latent_shape=latent_size, start_shape=(latent_size, 8, 8), channels=[32, 16, 8, 1], strides=[2, 2, 2, 1]
)
gen_net.apply(normal_init)
gen_net.conv.add_module("activation", torch.nn.Sigmoid())
gen_net = gen_net.to(device)
gen_opt = torch.optim.Adam(gen_net.parameters(), learning_rate, betas=betas)
gen_loss_criterion = torch.nn.BCELoss()
def generator_loss(gen_images):
output = disc_net(gen_images)
cats = output.new_full(output.shape, real_label)
return gen_loss_criterion(output, cats)
key_train_metric = None
train_handlers = [
StatsHandler(
name="training_loss", output_transform=lambda x: {Keys.GLOSS: x[Keys.GLOSS], Keys.DLOSS: x[Keys.DLOSS]}
),
TensorBoardStatsHandler(
log_dir=root_dir,
tag_name="training_loss",
output_transform=lambda x: {Keys.GLOSS: x[Keys.GLOSS], Keys.DLOSS: x[Keys.DLOSS]},
),
CheckpointSaver(
save_dir=root_dir, save_dict={"g_net": gen_net, "d_net": disc_net}, save_interval=2, epoch_level=True
),
]
disc_train_steps = 2
num_epochs = 5
trainer = GanTrainer(
device,
num_epochs,
train_loader,
gen_net,
gen_opt,
generator_loss,
disc_net,
disc_opt,
discriminator_loss,
d_train_steps=disc_train_steps,
latent_shape=latent_size,
key_train_metric=key_train_metric,
train_handlers=train_handlers,
)
trainer.run()
return trainer.state
@skip_if_quick
class IntegrationWorkflowsGAN(DistTestCase):
def setUp(self):
set_determinism(seed=0)
self.data_dir = tempfile.mkdtemp()
for i in range(40):
im, _ = create_test_image_2d(64, 64, num_objs=3, rad_max=14, num_seg_classes=1, channel_dim=-1)
n = nib.Nifti1Image(im, np.eye(4))
nib.save(n, os.path.join(self.data_dir, f"img{i:d}.nii.gz"))
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu:0")
monai.config.print_config()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
def tearDown(self):
set_determinism(seed=None)
shutil.rmtree(self.data_dir)
@TimedCall(seconds=200, daemon=False)
def test_training(self):
torch.manual_seed(0)
finish_state = run_training_test(self.data_dir, device=self.device)
# assert GAN training finished
self.assertEqual(finish_state.iteration, 100)
self.assertEqual(finish_state.epoch, 5)
if __name__ == "__main__":
unittest.main()
| 34.3625 | 115 | 0.684613 | [
"Apache-2.0"
] | Borda/MONAI | tests/test_integration_workflows_gan.py | 5,498 | Python |
import logging
import urllib.request
from datetime import datetime
from multiprocessing import Manager, Value
from multiprocessing.pool import ThreadPool
class EntryPoint:
Log = logging.getLogger(__name__)
def __init__(self):
self.__total_size = Value('i', 0)
self.__sizes_by_file = Manager().dict()
def main(self):
urls = ['https://code.jquery.com/jquery-git.js',
'https://code.jquery.com/jquery-3.1.0.js',
'https://code.jquery.com/jquery-3.0.0.js',
'https://code.jquery.com/jquery-2.2.0.js',
'https://code.jquery.com/jquery-2.1.0.js',
'https://code.jquery.com/jquery-2.0.0.js',
'https://code.jquery.com/jquery-1.12.0.js',
'https://code.jquery.com/jquery-1.11.0.js',
'https://code.jquery.com/jquery-1.10.0.js',
'https://code.jquery.com/jquery-1.9.0.js',
'https://code.jquery.com/jquery-1.7.0.js',
'https://code.jquery.com/jquery-1.6.js',
'https://code.jquery.com/jquery-1.5.js',
'https://code.jquery.com/jquery-1.4.js',
'https://code.jquery.com/jquery-1.3.js',
'https://code.jquery.com/jquery-1.2.js',
'https://code.jquery.com/jquery-1.1.js',
'https://code.jquery.com/jquery-1.0.js']
self.__compute_serially(urls)
self.__compute_with_threadpool(urls)
def __compute_serially(self, urls):
start_time = datetime.utcnow()
sizes_by_file = dict()
for url in urls:
sizes_by_file[url] = self.__get_size_of_file(url)
self.Log.info('Total size of all {0} URLs: {1}'.format(len(urls), sum(sizes_by_file.values())))
time_diff = datetime.utcnow() - start_time
self.Log.info("Serial version took: {0}".format(self.get_timespan(time_diff.seconds)))
def __compute_with_threadpool(self, urls):
start_time = datetime.utcnow()
pool = ThreadPool(processes=8)
pool.map(self.__get_size_of_file_in_parallel, urls)
self.Log.info('Total size of all {0} URLs: {1}'.format(len(urls), sum(self.__sizes_by_file.values())))
time_diff = datetime.utcnow() - start_time
self.Log.info("Threadpool version took: {0}".format(self.get_timespan(time_diff.seconds)))
def __get_size_of_file_in_parallel(self, url):
self.__sizes_by_file[url] = self.__get_size_of_file(url)
# with self.__total_size.get_lock():
# self.__total_size.value += self.__get_size_of_file(url)
@staticmethod
def __get_size_of_file(url):
with urllib.request.urlopen(url) as f:
contents = f.read()
return len(contents)
@staticmethod
def get_timespan(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
def setup_logging():
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
logger = logging.StreamHandler()
logger.setFormatter(logging.Formatter('%(asctime)s %(levelname)s - [%(thread)d] %(name)s - %(message)s'))
root_logger.addHandler(logger)
def main():
setup_logging()
log = logging.getLogger()
try:
EntryPoint().main()
except Exception as e:
log.exception(e)
if __name__ == '__main__':
main()
| 35.383838 | 111 | 0.591493 | [
"MIT"
] | russcollier/SamplesAndNuggets | python/threadpool_example.py | 3,503 | Python |
from typing import List, Tuple, Union
import numpy as np
import torch
import pytorch_lightning as pl
def calc_area(bbox: np.ndarray):
return (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
def calc_bbox_overlap_union_iou(pred: np.ndarray or None, teacher: np.ndarray) -> Tuple[float, float, float]:
"""
:param pred: ndarray (4, )
:param teacher: ndarray (4, )
:return: overlap, union, iou
"""
teacher_area = (teacher[2] - teacher[0]) * (teacher[3] - teacher[1])
if pred is None:
return 0.0, teacher_area, 0.0
pred_area = (pred[2] - pred[0]) * (pred[3] - pred[1])
intersection_width = np.maximum(np.minimum(pred[2], teacher[2]) - np.maximum(pred[0], teacher[0]), 0)
intersection_height = np.maximum(np.minimum(pred[3], teacher[3]) - np.maximum(pred[1], teacher[1]), 0)
overlap = intersection_width * intersection_height
union = teacher_area + pred_area - overlap
iou = overlap / union
return overlap, union, iou
class DetectionIoU(pl.metrics.Metric):
def __init__(self, n_classes: int, by_classes: bool = False):
super().__init__(compute_on_step=False)
self._n_classes = n_classes
self._by_classes = by_classes
self.add_state("image_count_by_classes", default=torch.tensor([0. for _ in range(n_classes)]),
dist_reduce_fx="sum")
self.add_state("total_iou_by_classes", default=torch.tensor([0. for _ in range(n_classes)]),
dist_reduce_fx="sum")
def update(self, preds: List[np.ndarray], targets: Union[np.ndarray, torch.Tensor]) -> None:
"""
:param preds: Sorted by score. (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))
:param targets: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))
:return:
"""
targets = targets.cpu().detach().numpy() if isinstance(targets, torch.Tensor) else targets
# 全探索だと遅いのでクラスごとにまとめておく
preds_by_class = []
for pred_bboxes in preds:
pred_by_class = [[] for _ in range(self._n_classes)]
for pred_bbox in pred_bboxes:
pred_by_class[int(pred_bbox[4])].append(pred_bbox)
preds_by_class.append(pred_by_class)
for i in range(targets.shape[0]): # Explore every batch.
bbox_annotations = targets[i, :, :]
# Exclude invalid label annotation.
bbox_annotations = bbox_annotations[bbox_annotations[:, 4] >= 0]
pred_by_class = preds_by_class[i]
"""
1画像でラベルごとに計算.
ラベルごとの面積合計/overlapを計算
1画像ごとにIoU算出、最終的に画像平均を算出
"""
total_area_by_classes = [0 for _ in range(self._n_classes)]
total_overlap_by_classes = [0 for _ in range(self._n_classes)]
is_label_appeared = [False for _ in range(self._n_classes)]
for bbox_annotation in bbox_annotations:
label = int(bbox_annotation[4])
total_area_by_classes[label] += calc_area(bbox_annotation)
pred_bboxes = pred_by_class[label]
if pred_bboxes is None or len(pred_bboxes) == 0:
continue
# Calculate area and overlap by class.
for pred_bbox in pred_bboxes:
overlap, _, _ = calc_bbox_overlap_union_iou(pred_bbox, bbox_annotation)
total_overlap_by_classes[label] += overlap
if is_label_appeared[label]:
continue
total_area_by_classes[label] += calc_area(pred_bbox)
is_label_appeared[label] = True
for label in range(self._n_classes):
# Not exist label in this data.
if total_area_by_classes[label] <= 0:
continue
self.total_iou_by_classes[label] += total_overlap_by_classes[label] / (
total_area_by_classes[label] - total_overlap_by_classes[label])
self.image_count_by_classes[label] += 1
def compute(self):
epsilon = 1e-8
iou_by_classes = self.total_iou_by_classes / (self.image_count_by_classes + epsilon)
if self._by_classes:
return iou_by_classes
return torch.mean(iou_by_classes)
class RecallPrecision(pl.metrics.Metric):
def __init__(self, n_classes: int, by_classes: bool = False):
super().__init__(compute_on_step=False)
self._n_classes = n_classes
self._by_classes = by_classes
self.add_state("tp_by_classes", default=torch.tensor([0 for _ in range(n_classes)]), dist_reduce_fx="sum")
self.add_state("fp_by_classes", default=torch.tensor([0 for _ in range(n_classes)]), dist_reduce_fx="sum")
self.add_state("fn_by_classes", default=torch.tensor([0 for _ in range(n_classes)]), dist_reduce_fx="sum")
def update(self, preds: List[np.ndarray], targets: Union[np.ndarray, torch.Tensor]) -> None:
"""
:param preds: Sorted by score. (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))
:param targets: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))
:return:
"""
targets = targets.cpu().detach().numpy() if isinstance(targets, torch.Tensor) else targets
# 全探索だと遅いのでクラスごとにまとめておく
preds_by_class = []
for pred_bboxes in preds:
pred_by_class = [[] for _ in range(self._n_classes)]
for pred_bbox in pred_bboxes:
pred_by_class[int(pred_bbox[4])].append(pred_bbox)
preds_by_class.append(pred_by_class)
for i in range(targets.shape[0]):
bbox_annotations = targets[i, :, :]
# Exclude invalid label annotation.
bbox_annotations = bbox_annotations[bbox_annotations[:, 4] >= 0]
pred_by_class = preds_by_class[i]
applied_bbox_count_by_classes = [0 for _ in range(self._n_classes)]
for bbox_annotation in bbox_annotations:
label = int(bbox_annotation[4])
pred_bboxes = pred_by_class[label]
if pred_bboxes is None or len(pred_bboxes) == 0:
self.fn_by_classes[label] += 1
continue
# Explore max iou of bbox_annotation
is_matched = False
for pred_bbox in pred_bboxes:
overlap, union, iou = calc_bbox_overlap_union_iou(pred_bbox, bbox_annotation)
if iou >= 0.5:
applied_bbox_count_by_classes[label] += 1
self.tp_by_classes[label] += 1
is_matched = True
break
if not is_matched:
self.fn_by_classes[label] += 1
for label in range(self._n_classes):
self.fp_by_classes[label] += len(pred_by_class[label]) - applied_bbox_count_by_classes[label]
def compute(self):
epsilon = 1e-8
recall = self.tp_by_classes / (self.tp_by_classes + self.fn_by_classes + epsilon)
precision = self.tp_by_classes / (self.tp_by_classes + self.fp_by_classes + epsilon)
f_score = 2. * recall * precision / (recall + precision + epsilon)
if self._by_classes:
return recall, precision, f_score
return torch.mean(recall), torch.mean(precision), torch.mean(f_score)
class MeanAveragePrecision(pl.metrics.Metric):
def __init__(self, n_classes: int, by_classes=False):
super().__init__(compute_on_step=False)
self._n_classes = n_classes
# TODO want to implement using add_state
self.fp_list_by_classes = [[] for _ in range(n_classes)]
self.tp_list_by_classes = [[] for _ in range(n_classes)]
self.score_list_by_classes = [[] for _ in range(n_classes)]
self.num_annotations_by_classes = [0 for _ in range(n_classes)]
# self.add_state("fp_list_by_classes", default=[[] for _ in range(n_classes)], dist_reduce_fx="cat")
# self.add_state("tp_list_by_classes", default=[[] for _ in range(n_classes)], dist_reduce_fx="cat")
# self.add_state("score_list_by_classes", default=[[] for _ in range(n_classes)], dist_reduce_fx="cat")
# self.add_state("num_annotations_by_classes", default=[0 for _ in range(n_classes)], dist_reduce_fx="cat")
self._by_classes = by_classes
def update(self, preds: List[np.ndarray], targets: Union[np.ndarray, torch.Tensor]) -> None:
"""
:param preds: Sorted by score. (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))
:param targets: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))
:return:
"""
targets = targets.cpu().detach().numpy() if isinstance(targets, torch.Tensor) else targets
for i in range(len(preds)):
pred_bboxes, target_bboxes = preds[i], targets[i]
# exclude invalid annotations.
target_bboxes = target_bboxes[target_bboxes[:, 4] >= 0]
self._update_num_annotations(target_bboxes)
self._update_tp_fp_score(pred_bboxes, target_bboxes)
def compute(self):
ap_by_classes = [0 for _ in range(self._n_classes)]
for label in range(self._n_classes):
num_annotations = self.num_annotations_by_classes[label]
tp_list, fp_list = np.array(self.tp_list_by_classes[label]), np.array(self.fp_list_by_classes[label])
scores = np.array(self.score_list_by_classes[label])
indices = np.argsort(-scores)
# sort by score
tp_list, fp_list = tp_list[indices], fp_list[indices]
# cumulative sum
tp_list, fp_list = np.cumsum(tp_list), np.cumsum(fp_list)
if num_annotations == 0:
ap_by_classes[label] = 0
continue
recall_curve = tp_list / num_annotations
precision_curve = tp_list / np.maximum(tp_list + fp_list, np.finfo(np.float64).eps)
ap_by_classes[label] = self._compute_average_precision(recall_curve, precision_curve)
return ap_by_classes if self._by_classes else sum(ap_by_classes) / len(ap_by_classes)
def _update_tp_fp_score(self, pred_bboxes: np.ndarray, target_bboxes: np.ndarray):
"""
:param pred_bboxes: (N, 6(xmin, ymin, xmax, ymax, class, score))
:param target_bboxes: (N, 5(xmin, ymin, xmax, ymax, class))
"""
detected_indices = []
for i in range(pred_bboxes.shape[0]):
pred_label, pred_score = int(pred_bboxes[i][4]), pred_bboxes[i][5]
matched = False
for j in filter(lambda k: int(target_bboxes[k][4]) == pred_label and k not in detected_indices,
range(target_bboxes.shape[0])):
overlap, union, iou = calc_bbox_overlap_union_iou(pred_bboxes[i], target_bboxes[j])
if iou >= 0.5:
detected_indices.append(j)
self.fp_list_by_classes[pred_label].append(0)
self.tp_list_by_classes[pred_label].append(1)
matched = True
break
if not matched:
self.fp_list_by_classes[pred_label].append(1)
self.tp_list_by_classes[pred_label].append(0)
self.score_list_by_classes[pred_label].append(pred_score)
def _update_num_annotations(self, target_bboxes: np.ndarray):
"""
:param target_bboxes: (N, 5(xmin, ymin, xmax, ymax, class))
"""
counts = list(map(lambda i: np.count_nonzero(target_bboxes[:, 4] == i), range(self._n_classes)))
self.num_annotations_by_classes = list(
map(lambda i: counts[i] + self.num_annotations_by_classes[i], range(self._n_classes)))
def _compute_average_precision(self, recall_curve: np.ndarray, precision_curve: np.ndarray):
# Reference by https://github.com/toandaominh1997/EfficientDet.Pytorch/blob/master/eval.py
assert recall_curve.ndim == 1 and precision_curve.ndim == 1
# correct AP calculation
# first append sentinel values at the end
mean_recall = np.concatenate(([0.], recall_curve, [1.]))
mean_precision = np.concatenate(([0.], precision_curve, [0.]))
# compute the precision envelope
for i in range(mean_precision.size - 1, 0, -1):
mean_precision[i - 1] = np.maximum(mean_precision[i - 1], mean_precision[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mean_recall[1:] != mean_recall[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mean_recall[i + 1] - mean_recall[i]) * mean_precision[i + 1])
return ap
def reset(self):
self.fp_list_by_classes = [[] for _ in range(self._n_classes)]
self.tp_list_by_classes = [[] for _ in range(self._n_classes)]
self.score_list_by_classes = [[] for _ in range(self._n_classes)]
self.num_annotations_by_classes = [0 for _ in range(self._n_classes)]
| 47.589928 | 115 | 0.621391 | [
"MIT"
] | pei223/deepext-with-lightning | deepext_with_lightning/metrics/object_detection.py | 13,400 | Python |
# coding: utf-8
import math
import random
import time
import asyncclick as click
@click.group()
def cli():
"""This script showcases different terminal UI helpers in Click."""
pass
@cli.command()
def colordemo():
"""Demonstrates ANSI color support."""
for color in "red", "green", "blue":
click.echo(click.style("I am colored {}".format(color), fg=color))
click.echo(click.style("I am background colored {}".format(color), bg=color))
@cli.command()
def pager():
"""Demonstrates using the pager."""
lines = []
for x in range(200):
lines.append("{}. Hello World!".format(click.style(str(x), fg="green")))
click.echo_via_pager("\n".join(lines))
@cli.command()
@click.option(
"--count",
default=8000,
type=click.IntRange(1, 100000),
help="The number of items to process.",
)
def progress(count):
"""Demonstrates the progress bar."""
items = range(count)
def process_slowly(item):
time.sleep(0.002 * random.random())
def filter(items):
for item in items:
if random.random() > 0.3:
yield item
with click.progressbar(
items, label="Processing accounts", fill_char=click.style("#", fg="green")
) as bar:
for item in bar:
process_slowly(item)
def show_item(item):
if item is not None:
return "Item #{}".format(item)
with click.progressbar(
filter(items),
label="Committing transaction",
fill_char=click.style("#", fg="yellow"),
item_show_func=show_item,
) as bar:
for item in bar:
process_slowly(item)
with click.progressbar(
length=count,
label="Counting",
bar_template="%(label)s %(bar)s | %(info)s",
fill_char=click.style(u"█", fg="cyan"),
empty_char=" ",
) as bar:
for item in bar:
process_slowly(item)
with click.progressbar(
length=count,
width=0,
show_percent=False,
show_eta=False,
fill_char=click.style("#", fg="magenta"),
) as bar:
for item in bar:
process_slowly(item)
# 'Non-linear progress bar'
steps = [math.exp(x * 1.0 / 20) - 1 for x in range(20)]
count = int(sum(steps))
with click.progressbar(
length=count,
show_percent=False,
label="Slowing progress bar",
fill_char=click.style(u"█", fg="green"),
) as bar:
for item in steps:
time.sleep(item)
bar.update(item)
@cli.command()
@click.argument("url")
def open(url):
"""Opens a file or URL In the default application."""
click.launch(url)
@cli.command()
@click.argument("url")
def locate(url):
"""Opens a file or URL In the default application."""
click.launch(url, locate=True)
@cli.command()
def edit():
"""Opens an editor with some text in it."""
MARKER = "# Everything below is ignored\n"
message = click.edit("\n\n{}".format(MARKER))
if message is not None:
msg = message.split(MARKER, 1)[0].rstrip("\n")
if not msg:
click.echo("Empty message!")
else:
click.echo("Message:\n{}".format(msg))
else:
click.echo("You did not enter anything!")
@cli.command()
def clear():
"""Clears the entire screen."""
click.clear()
@cli.command()
def pause():
"""Waits for the user to press a button."""
click.pause()
@cli.command()
def menu():
"""Shows a simple menu."""
menu = "main"
while 1:
if menu == "main":
click.echo("Main menu:")
click.echo(" d: debug menu")
click.echo(" q: quit")
char = click.getchar()
if char == "d":
menu = "debug"
elif char == "q":
menu = "quit"
else:
click.echo("Invalid input")
elif menu == "debug":
click.echo("Debug menu")
click.echo(" b: back")
char = click.getchar()
if char == "b":
menu = "main"
else:
click.echo("Invalid input")
elif menu == "quit":
return
| 24.695906 | 85 | 0.550556 | [
"BSD-3-Clause"
] | D4N/asyncclick | examples/termui/termui.py | 4,227 | Python |
########################## FWMAV Simulation #########################
# Version 0.3
# Fan Fei Feb 2019
# Direct motor driven flapping wing MAV simulation
#######################################################################
import gym
import flappy
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.common.vec_env import SubprocVecEnv
from stable_baselines.common import set_global_seeds
from flappy.envs.fwmav.controllers.arc_xy_arc_z import ARCController
from flappy.envs.fwmav.controllers.pid_controller import PIDController
import time
import argparse
import importlib
import numpy as np
def make_env(env_id, rank, seed=0, random_init = True, randomize_sim = True, phantom_sensor = False):
def _init():
env = gym.make(env_id)
env.config(random_init, randomize_sim, phantom_sensor)
if rank == 0:
env.enable_visualization()
env.enable_print()
env.seed(seed + rank)
return env
# set_global_seeds(seed)
return _init
class LazyModel:
def __init__(self,env,model_type):
self.action_lb = env.action_lb
self.action_ub = env.action_ub
self.observation_bound = env.observation_bound
if model_type == 'PID':
self.policy = PIDController(env.sim.dt_c)
elif model_type == 'ARC':
self.policy = ARCController(env.sim.dt_c)
else:
raise Exception('Error')
def predict(self, obs):
action = self.policy.get_action(obs[0]*self.observation_bound)
# scale action from [action_lb, action_ub] to [-1,1]
# since baseline does not support asymmetric action space
normalized_action = (action-self.action_lb)/(self.action_ub - self.action_lb)*2 - 1
action = np.array([normalized_action])
return action, None
def main(args):
env_id = 'fwmav_hover-v0'
env = DummyVecEnv([make_env(env_id, 0, random_init = args.rand_init, randomize_sim = args.rand_dynamics, phantom_sensor = args.phantom_sensor)])
if args.model_type != 'PID' and args.model_type != 'ARC':
try:
model_cls = getattr(
importlib.import_module('stable_baselines'), args.model_type)
except AttributeError:
print(args.model_type, "Error: wrong model type")
return
try:
model = model_cls.load(args.model_path)
except:
print(args.model_path, "Error: wrong model path")
else:
model = LazyModel(env.envs[0],args.model_type)
obs = env.reset()
while True:
if env.envs[0].is_sim_on == False:
env.envs[0].gui.cv.wait()
elif env.envs[0].is_sim_on:
action, _ = model.predict(obs)
obs, rewards, done, info = env.step(action)
# if done:
# obs = env.reset()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_type', required=True)
parser.add_argument('--model_path')
parser.add_argument(
'--policy_type', const='MlpPolicy', default='MlpPolicy', nargs='?')
parser.add_argument('--rand_init', action='store_true', default=False)
parser.add_argument('--rand_dynamics', action='store_true', default=False)
parser.add_argument('--phantom_sensor', action='store_true', default=False)
args = parser.parse_args()
main(args) | 31.612245 | 145 | 0.714009 | [
"MIT"
] | ArbalestV/flappy | test.py | 3,098 | Python |
ano = input('Digite o ano: ')
mes = input('Digite o mes: ')
dia = input('Digite o dia')
print('{}/{}/{}'.format(dia, mes, ano))
print(dia, mes, ano, sep='/')
print(type(ano))
eval(ano)
print(type(eval(ano)))
| 23.111111 | 39 | 0.605769 | [
"MIT"
] | joaorobsonR/algoritmo1 | UNIVESPalgortimo_1/s4aula2.py | 208 | Python |
import os
import sys
__all__ = [
'lexsort','sort', 'argsort','argmin', 'argmax', 'searchsorted']
from pnumpy._pnumpy import getitem, lexsort32, lexsort64
import numpy as np
from numpy import asarray, array, asanyarray
from numpy import concatenate
#array_function_dispatch = functools.partial(
# overrides.array_function_dispatch, module='numpy')
# functions that are now methods
def _wrapit(obj, method, *args, **kwds):
try:
wrap = obj.__array_wrap__
except AttributeError:
wrap = None
result = getattr(asarray(obj), method)(*args, **kwds)
if wrap:
if not isinstance(result, mu.ndarray):
result = asarray(result)
result = wrap(result)
return result
def _wrapfunc(obj, method, *args, **kwds):
bound = getattr(obj, method, None)
if bound is None:
return _wrapit(obj, method, *args, **kwds)
try:
return bound(*args, **kwds)
except TypeError:
# A TypeError occurs if the object does have such a method in its
# class, but its signature is not identical to that of NumPy's. This
# situation has occurred in the case of a downstream library like
# 'pandas'.
#
# Call _wrapit from within the except clause to ensure a potential
# exception has a traceback chain.
return _wrapit(obj, method, *args, **kwds)
def sort(a, axis=-1, kind=None, order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : array_like
Array to be sorted.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort or radix sort under the covers and, in general,
the actual implementation will vary with data type. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
Threading
---------
Up to 8 threads
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
partition : Partial sort.
Notes
-----
The various sorting algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative
order. The four algorithms implemented in NumPy have the following
properties:
=========== ======= ============= ============ ========
kind speed worst case work space stable
=========== ======= ============= ============ ========
'quicksort' 1 O(n^2) 0 no
'heapsort' 3 O(n*log(n)) 0 no
'mergesort' 2 O(n*log(n)) ~n/2 yes
'timsort' 2 O(n*log(n)) ~n/2 yes
=========== ======= ============= ============ ========
.. note:: The datatype determines which of 'mergesort' or 'timsort'
is actually used, even if 'mergesort' is specified. User selection
at a finer scale is not currently available.
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
the last axis is faster and uses less space than sorting along
any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Previous to numpy 1.4.0 sorting real and complex arrays containing nan
values led to undefined behaviour. In numpy versions >= 1.4.0 nan
values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
where R is a non-nan real value. Complex values with the same nan
placements are sorted according to the non-nan part if it exists.
Non-nan values are sorted as before.
.. versionadded:: 1.12.0
quicksort has been changed to `introsort <https://en.wikipedia.org/wiki/Introsort>`_.
When sorting does not make enough progress it switches to
`heapsort <https://en.wikipedia.org/wiki/Heapsort>`_.
This implementation makes quicksort O(n*log(n)) in the worst case.
'stable' automatically chooses the best stable sorting algorithm
for the data type being sorted.
It, along with 'mergesort' is currently mapped to
`timsort <https://en.wikipedia.org/wiki/Timsort>`_
or `radix sort <https://en.wikipedia.org/wiki/Radix_sort>`_
depending on the data type.
API forward compatibility currently limits the
ability to select the implementation and it is hardwired for the different
data types.
.. versionadded:: 1.17.0
Timsort is added for better performance on already or nearly
sorted data. On random data timsort is almost identical to
mergesort. It is now used for stable sort while quicksort is still the
default sort if none is chosen. For timsort details, refer to
`CPython listsort.txt <https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_.
'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an
O(n) sort instead of O(n log n).
.. versionchanged:: 1.18.0
NaT now sorts to the end of arrays for consistency with NaN.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Sort by age, then height if ages are equal:
>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
"""
if axis is None:
# flatten returns (1, N) for np.matrix, so always use the last axis
a = asanyarray(a).flatten()
axis = -1
try:
# attempt a parallel sort
sort(a, kind=kind)
return a
except Exception:
pass
else:
a = asanyarray(a).copy(order="K")
# normal numpy code
a.sort(axis=axis, kind=kind, order=order)
return a
def lexsort(*args, **kwargs):
"""
Perform an indirect stable sort using a sequence of keys.
Given multiple sorting keys, which can be interpreted as columns in a
spreadsheet, lexsort returns an array of integer indices that describes
the sort order by multiple columns. The last key in the sequence is used
for the primary sort order, the second-to-last key for the secondary sort
order, and so on. The keys argument must be a sequence of objects that
can be converted to arrays of the same shape. If a 2D array is provided
for the keys argument, it's rows are interpreted as the sorting keys and
sorting is according to the last row, second last row etc.
Parameters
----------
keys : (k, N) array or tuple containing k (N,)-shaped sequences
The `k` different "columns" to be sorted. The last column (or row if
`keys` is a 2D array) is the primary sort key.
axis : int, optional
Axis to be indirectly sorted. By default, sort over the last axis.
Returns
-------
indices : (N,) ndarray of ints
Array of indices that sort the keys along the specified axis.
Threading
---------
Up to 8 threads
See Also
--------
argsort : Indirect sort.
ndarray.sort : In-place sort.
sort : Return a sorted copy of an array.
Examples
--------
Sort names: first by surname, then by name.
>>> surnames = ('Hertz', 'Galilei', 'Hertz')
>>> first_names = ('Heinrich', 'Galileo', 'Gustav')
>>> ind = np.lexsort((first_names, surnames))
>>> ind
array([1, 2, 0])
>>> [surnames[i] + ", " + first_names[i] for i in ind]
['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
Sort two columns of numbers:
>>> a = [1,5,1,4,3,4,4] # First column
>>> b = [9,4,0,4,0,2,1] # Second column
>>> ind = np.lexsort((b,a)) # Sort by a, then by b
>>> ind
array([2, 0, 4, 6, 5, 3, 1])
>>> [(a[i],b[i]) for i in ind]
[(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
Note that sorting is first according to the elements of ``a``.
Secondary sorting is according to the elements of ``b``.
A normal ``argsort`` would have yielded:
>>> [(a[i],b[i]) for i in np.argsort(a)]
[(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
Structured arrays are sorted lexically by ``argsort``:
>>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
... dtype=np.dtype([('x', int), ('y', int)]))
>>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
array([2, 0, 4, 6, 5, 3, 1])
"""
try:
return lexsort32(*args, **kwargs)
except Exception:
return np.lexsort(*args, **kwargs)
def argsort(a, axis=-1, kind=None, order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort under the covers and, in general, the
actual implementation will vary with data type. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified `axis`.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
More generally, ``np.take_along_axis(a, index_array, axis=axis)``
always yields the sorted `a`, irrespective of dimensionality.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
argpartition : Indirect partial sort.
take_along_axis : Apply ``index_array`` from argsort
to an array as if by calling sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> ind = np.argsort(x, axis=0) # sorts along first axis (down)
>>> ind
array([[0, 1],
[1, 0]])
>>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)
array([[0, 2],
[2, 3]])
>>> ind = np.argsort(x, axis=1) # sorts along last axis (across)
>>> ind
array([[0, 1],
[0, 1]])
>>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)
array([[0, 3],
[2, 2]])
Indices of the sorted elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
>>> ind
(array([0, 1, 1, 0]), array([0, 0, 1, 1]))
>>> x[ind] # same as np.sort(x, axis=None)
array([0, 2, 2, 3])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
"""
return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order)
def _argmax_dispatcher(a, axis=None, out=None):
return (a, out)
def argmax(a, axis=None, out=None):
"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
take_along_axis : Apply ``np.expand_dims(index_array, axis)``
from argmax to an array as if by calling max.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
array([1, 1, 1])
>>> np.argmax(a, axis=1)
array([2, 2])
Indexes of the maximal elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape)
>>> ind
(1, 2)
>>> a[ind]
15
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0, 5, 2, 3, 4, 5])
>>> np.argmax(b) # Only the first occurrence is returned.
1
>>> x = np.array([[4,2,3], [1,0,3]])
>>> index_array = np.argmax(x, axis=-1)
>>> # Same as np.max(x, axis=-1, keepdims=True)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
array([[4],
[3]])
>>> # Same as np.max(x, axis=-1)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
array([4, 3])
"""
return _wrapfunc(a, 'argmax', axis=axis, out=out)
def _argmin_dispatcher(a, axis=None, out=None):
return (a, out)
def argmin(a, axis=None, out=None):
"""
Returns the indices of the minimum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmin, argmax
amin : The minimum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
take_along_axis : Apply ``np.expand_dims(index_array, axis)``
from argmin to an array as if by calling min.
Notes
-----
In case of multiple occurrences of the minimum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15]])
>>> np.argmin(a)
0
>>> np.argmin(a, axis=0)
array([0, 0, 0])
>>> np.argmin(a, axis=1)
array([0, 0])
Indices of the minimum elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape)
>>> ind
(0, 0)
>>> a[ind]
10
>>> b = np.arange(6) + 10
>>> b[4] = 10
>>> b
array([10, 11, 12, 13, 10, 15])
>>> np.argmin(b) # Only the first occurrence is returned.
0
>>> x = np.array([[4,2,3], [1,0,3]])
>>> index_array = np.argmin(x, axis=-1)
>>> # Same as np.min(x, axis=-1, keepdims=True)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
array([[2],
[0]])
>>> # Same as np.max(x, axis=-1)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
array([2, 0])
"""
return _wrapfunc(a, 'argmin', axis=axis, out=out)
def _searchsorted_dispatcher(a, v, side=None, sorter=None):
return (a, v, sorter)
def searchsorted(a, v, side='left', sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `a` such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `a` would be preserved.
Assuming that `a` is sorted:
====== ============================
`side` returned index `i` satisfies
====== ============================
left ``a[i-1] < v <= a[i]``
right ``a[i-1] <= v < a[i]``
====== ============================
Parameters
----------
a : 1-D array_like
Input array. If `sorter` is None, then it must be sorted in
ascending order, otherwise `sorter` must be an array of indices
that sort it.
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
.. versionadded:: 1.7.0
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
sort : Return a sorted copy of an array.
histogram : Produce histogram from 1-D data.
Notes
-----
Binary search is used to find the required insertion points.
As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing
`nan` values. The enhanced sort order is documented in `sort`.
This function uses the same algorithm as the builtin python `bisect.bisect_left`
(``side='left'``) and `bisect.bisect_right` (``side='right'``) functions,
which is also vectorized in the `v` argument.
Examples
--------
>>> np.searchsorted([1,2,3,4,5], 3)
2
>>> np.searchsorted([1,2,3,4,5], 3, side='right')
3
>>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
array([0, 5, 1, 2])
"""
return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter)
| 33.043887 | 94 | 0.591737 | [
"MIT"
] | Quansight/numpy-threading-extensions | src/pnumpy/sort.py | 21,082 | Python |
#!/usr/bin/env python
"""html2text: Turn HTML into equivalent Markdown-structured text."""
__version__ = "3.200.3"
__author__ = "Aaron Swartz ([email protected])"
__copyright__ = "(C) 2004-2008 Aaron Swartz. GNU GPL 3."
__contributors__ = ["Martin 'Joey' Schulze", "Ricardo Reyes", "Kevin Jay North"]
# TODO:
# Support decoded entities with unifiable.
try:
True
except NameError:
setattr(__builtins__, 'True', 1)
setattr(__builtins__, 'False', 0)
def has_key(x, y):
if hasattr(x, 'has_key'): return x.has_key(y)
else: return y in x
try:
import htmlentitydefs
import urlparse
import HTMLParser
except ImportError: #Python3
import html.entities as htmlentitydefs
import urllib.parse as urlparse
import html.parser as HTMLParser
try: #Python3
import urllib.request as urllib
except:
import urllib
import optparse, re, sys, codecs, types
try: from textwrap import wrap
except: pass
# Use Unicode characters instead of their ascii psuedo-replacements
UNICODE_SNOB = 0
# Escape all special characters. Output is less readable, but avoids corner case formatting issues.
ESCAPE_SNOB = 0
# Put the links after each paragraph instead of at the end.
LINKS_EACH_PARAGRAPH = 0
# Wrap long lines at position. 0 for no wrapping. (Requires Python 2.3.)
BODY_WIDTH = 78
# Don't show internal links (href="#local-anchor") -- corresponding link targets
# won't be visible in the plain text file anyway.
SKIP_INTERNAL_LINKS = True
# Use inline, rather than reference, formatting for images and links
INLINE_LINKS = True
# Number of pixels Google indents nested lists
GOOGLE_LIST_INDENT = 36
IGNORE_ANCHORS = False
IGNORE_IMAGES = False
IGNORE_EMPHASIS = False
### Entity Nonsense ###
def name2cp(k):
if k == 'apos': return ord("'")
if hasattr(htmlentitydefs, "name2codepoint"): # requires Python 2.3
return htmlentitydefs.name2codepoint[k]
else:
k = htmlentitydefs.entitydefs[k]
if k.startswith("&#") and k.endswith(";"): return int(k[2:-1]) # not in latin-1
return ord(codecs.latin_1_decode(k)[0])
unifiable = {'rsquo':"'", 'lsquo':"'", 'rdquo':'"', 'ldquo':'"',
'copy':'(C)', 'mdash':'--', 'nbsp':' ', 'rarr':'->', 'larr':'<-', 'middot':'*',
'ndash':'-', 'oelig':'oe', 'aelig':'ae',
'agrave':'a', 'aacute':'a', 'acirc':'a', 'atilde':'a', 'auml':'a', 'aring':'a',
'egrave':'e', 'eacute':'e', 'ecirc':'e', 'euml':'e',
'igrave':'i', 'iacute':'i', 'icirc':'i', 'iuml':'i',
'ograve':'o', 'oacute':'o', 'ocirc':'o', 'otilde':'o', 'ouml':'o',
'ugrave':'u', 'uacute':'u', 'ucirc':'u', 'uuml':'u',
'lrm':'', 'rlm':''}
unifiable_n = {}
for k in unifiable.keys():
unifiable_n[name2cp(k)] = unifiable[k]
### End Entity Nonsense ###
def onlywhite(line):
"""Return true if the line does only consist of whitespace characters."""
for c in line:
if c != ' ' and c != ' ':
return c == ' '
return line
def hn(tag):
if tag[0] == 'h' and len(tag) == 2:
try:
n = int(tag[1])
if n in range(1, 10): return n
except ValueError: return 0
def dumb_property_dict(style):
"""returns a hash of css attributes"""
return dict([(x.strip(), y.strip()) for x, y in [z.split(':', 1) for z in style.split(';') if ':' in z]]);
def dumb_css_parser(data):
"""returns a hash of css selectors, each of which contains a hash of css attributes"""
# remove @import sentences
data += ';'
importIndex = data.find('@import')
while importIndex != -1:
data = data[0:importIndex] + data[data.find(';', importIndex) + 1:]
importIndex = data.find('@import')
# parse the css. reverted from dictionary compehension in order to support older pythons
elements = [x.split('{') for x in data.split('}') if '{' in x.strip()]
try:
elements = dict([(a.strip(), dumb_property_dict(b)) for a, b in elements])
except ValueError:
elements = {} # not that important
return elements
def element_style(attrs, style_def, parent_style):
"""returns a hash of the 'final' style attributes of the element"""
style = parent_style.copy()
if 'class' in attrs:
for css_class in attrs['class'].split():
css_style = style_def['.' + css_class]
style.update(css_style)
if 'style' in attrs:
immediate_style = dumb_property_dict(attrs['style'])
style.update(immediate_style)
return style
def google_list_style(style):
"""finds out whether this is an ordered or unordered list"""
if 'list-style-type' in style:
list_style = style['list-style-type']
if list_style in ['disc', 'circle', 'square', 'none']:
return 'ul'
return 'ol'
def google_has_height(style):
"""check if the style of the element has the 'height' attribute explicitly defined"""
if 'height' in style:
return True
return False
def google_text_emphasis(style):
"""return a list of all emphasis modifiers of the element"""
emphasis = []
if 'text-decoration' in style:
emphasis.append(style['text-decoration'])
if 'font-style' in style:
emphasis.append(style['font-style'])
if 'font-weight' in style:
emphasis.append(style['font-weight'])
return emphasis
def google_fixed_width_font(style):
"""check if the css of the current element defines a fixed width font"""
font_family = ''
if 'font-family' in style:
font_family = style['font-family']
if 'Courier New' == font_family or 'Consolas' == font_family:
return True
return False
def list_numbering_start(attrs):
"""extract numbering from list element attributes"""
if 'start' in attrs:
return int(attrs['start']) - 1
else:
return 0
class HTML2Text(HTMLParser.HTMLParser):
def __init__(self, out=None, baseurl=''):
HTMLParser.HTMLParser.__init__(self)
# Config options
self.unicode_snob = UNICODE_SNOB
self.escape_snob = ESCAPE_SNOB
self.links_each_paragraph = LINKS_EACH_PARAGRAPH
self.body_width = BODY_WIDTH
self.skip_internal_links = SKIP_INTERNAL_LINKS
self.inline_links = INLINE_LINKS
self.google_list_indent = GOOGLE_LIST_INDENT
self.ignore_links = IGNORE_ANCHORS
self.ignore_images = IGNORE_IMAGES
self.ignore_emphasis = IGNORE_EMPHASIS
self.google_doc = False
self.ul_item_mark = '*'
self.emphasis_mark = '_'
self.strong_mark = '**'
if out is None:
self.out = self.outtextf
else:
self.out = out
self.outtextlist = [] # empty list to store output characters before they are "joined"
try:
self.outtext = unicode()
except NameError: # Python3
self.outtext = str()
self.quiet = 0
self.p_p = 0 # number of newline character to print before next output
self.outcount = 0
self.start = 1
self.space = 0
self.a = []
self.astack = []
self.maybe_automatic_link = None
self.absolute_url_matcher = re.compile(r'^[a-zA-Z+]+://')
self.acount = 0
self.list = []
self.blockquote = 0
self.pre = 0
self.startpre = 0
self.code = False
self.br_toggle = ''
self.lastWasNL = 0
self.lastWasList = False
self.style = 0
self.style_def = {}
self.tag_stack = []
self.emphasis = 0
self.drop_white_space = 0
self.inheader = False
self.abbr_title = None # current abbreviation definition
self.abbr_data = None # last inner HTML (for abbr being defined)
self.abbr_list = {} # stack of abbreviations to write later
self.baseurl = baseurl
try: del unifiable_n[name2cp('nbsp')]
except KeyError: pass
unifiable['nbsp'] = ' _place_holder;'
def feed(self, data):
data = data.replace("</' + 'script>", "</ignore>")
HTMLParser.HTMLParser.feed(self, data)
def handle(self, data):
self.feed(data)
self.feed("")
return self.optwrap(self.close())
def outtextf(self, s):
self.outtextlist.append(s)
if s: self.lastWasNL = s[-1] == '\n'
def close(self):
HTMLParser.HTMLParser.close(self)
self.pbr()
self.o('', 0, 'end')
self.outtext = self.outtext.join(self.outtextlist)
if self.unicode_snob:
nbsp = unichr(name2cp('nbsp'))
else:
nbsp = u' '
self.outtext = self.outtext.replace(u' _place_holder;', nbsp)
return self.outtext
def handle_charref(self, c):
self.o(self.charref(c), 1)
def handle_entityref(self, c):
self.o(self.entityref(c), 1)
def handle_starttag(self, tag, attrs):
self.handle_tag(tag, attrs, 1)
def handle_endtag(self, tag):
self.handle_tag(tag, None, 0)
def previousIndex(self, attrs):
""" returns the index of certain set of attributes (of a link) in the
self.a list
If the set of attributes is not found, returns None
"""
if not has_key(attrs, 'href'): return None
i = -1
for a in self.a:
i += 1
match = 0
if has_key(a, 'href') and a['href'] == attrs['href']:
if has_key(a, 'title') or has_key(attrs, 'title'):
if (has_key(a, 'title') and has_key(attrs, 'title') and
a['title'] == attrs['title']):
match = True
else:
match = True
if match: return i
def drop_last(self, nLetters):
if not self.quiet:
self.outtext = self.outtext[:-nLetters]
def handle_emphasis(self, start, tag_style, parent_style):
"""handles various text emphases"""
tag_emphasis = google_text_emphasis(tag_style)
parent_emphasis = google_text_emphasis(parent_style)
# handle Google's text emphasis
strikethrough = 'line-through' in tag_emphasis and self.hide_strikethrough
bold = 'bold' in tag_emphasis and not 'bold' in parent_emphasis
italic = 'italic' in tag_emphasis and not 'italic' in parent_emphasis
fixed = google_fixed_width_font(tag_style) and not \
google_fixed_width_font(parent_style) and not self.pre
if start:
# crossed-out text must be handled before other attributes
# in order not to output qualifiers unnecessarily
if bold or italic or fixed:
self.emphasis += 1
if strikethrough:
self.quiet += 1
if italic:
self.o(self.emphasis_mark)
self.drop_white_space += 1
if bold:
self.o(self.strong_mark)
self.drop_white_space += 1
if fixed:
self.o('`')
self.drop_white_space += 1
self.code = True
else:
if bold or italic or fixed:
# there must not be whitespace before closing emphasis mark
self.emphasis -= 1
self.space = 0
self.outtext = self.outtext.rstrip()
if fixed:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o('`')
self.code = False
if bold:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(2)
self.drop_white_space -= 1
else:
self.o(self.strong_mark)
if italic:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o(self.emphasis_mark)
# space is only allowed after *all* emphasis marks
if (bold or italic) and not self.emphasis:
self.o(" ")
if strikethrough:
self.quiet -= 1
def handle_tag(self, tag, attrs, start):
#attrs = fixattrs(attrs)
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
if self.google_doc:
# the attrs parameter is empty for a closing tag. in addition, we
# need the attributes of the parent nodes in order to get a
# complete style description for the current element. we assume
# that google docs export well formed html.
parent_style = {}
if start:
if self.tag_stack:
parent_style = self.tag_stack[-1][2]
tag_style = element_style(attrs, self.style_def, parent_style)
self.tag_stack.append((tag, attrs, tag_style))
else:
dummy, attrs, tag_style = self.tag_stack.pop()
if self.tag_stack:
parent_style = self.tag_stack[-1][2]
if hn(tag):
self.p()
if start:
self.inheader = True
self.o(hn(tag)*"#" + ' ')
else:
self.inheader = False
return # prevent redundant emphasis marks on headers
if tag in ['p', 'div']:
if self.google_doc:
if start and google_has_height(tag_style):
self.p()
else:
self.soft_br()
else:
self.p()
if tag == "br" and start: self.o(" \n")
if tag == "hr" and start:
self.p()
self.o("* * *")
self.p()
if tag in ["head", "style", 'script']:
if start: self.quiet += 1
else: self.quiet -= 1
if tag == "style":
if start: self.style += 1
else: self.style -= 1
if tag in ["body"]:
self.quiet = 0 # sites like 9rules.com never close <head>
if tag == "blockquote":
if start:
self.p(); self.o('> ', 0, 1); self.start = 1
self.blockquote += 1
else:
self.blockquote -= 1
self.p()
if tag in ['em', 'i', 'u'] and not self.ignore_emphasis: self.o(self.emphasis_mark)
if tag in ['strong', 'b'] and not self.ignore_emphasis: self.o(self.strong_mark)
if tag in ['del', 'strike', 's']:
if start:
self.o("<"+tag+">")
else:
self.o("</"+tag+">")
if self.google_doc:
if not self.inheader:
# handle some font attributes, but leave headers clean
self.handle_emphasis(start, tag_style, parent_style)
if tag in ["code", "tt"] and not self.pre: self.o('`') #TODO: `` `this` ``
if tag == "abbr":
if start:
self.abbr_title = None
self.abbr_data = ''
if has_key(attrs, 'title'):
self.abbr_title = attrs['title']
else:
if self.abbr_title != None:
self.abbr_list[self.abbr_data] = self.abbr_title
self.abbr_title = None
self.abbr_data = ''
if tag == "a" and not self.ignore_links:
if start:
if has_key(attrs, 'href') and not (self.skip_internal_links and attrs['href'].startswith('#')):
self.astack.append(attrs)
self.maybe_automatic_link = attrs['href']
else:
self.astack.append(None)
else:
if self.astack:
a = self.astack.pop()
if self.maybe_automatic_link:
self.maybe_automatic_link = None
elif a:
if self.inline_links:
self.o("](" + escape_md(a['href']) + ")")
else:
i = self.previousIndex(a)
if i is not None:
a = self.a[i]
else:
self.acount += 1
a['count'] = self.acount
a['outcount'] = self.outcount
self.a.append(a)
self.o("][" + str(a['count']) + "]")
if tag == "img" and start and not self.ignore_images:
if has_key(attrs, 'src'):
attrs['href'] = attrs['src']
alt = attrs.get('alt', '')
self.o("![" + escape_md(alt) + "]")
if self.inline_links:
self.o("(" + escape_md(attrs['href']) + ")")
else:
i = self.previousIndex(attrs)
if i is not None:
attrs = self.a[i]
else:
self.acount += 1
attrs['count'] = self.acount
attrs['outcount'] = self.outcount
self.a.append(attrs)
self.o("[" + str(attrs['count']) + "]")
if tag == 'dl' and start: self.p()
if tag == 'dt' and not start: self.pbr()
if tag == 'dd' and start: self.o(' ')
if tag == 'dd' and not start: self.pbr()
if tag in ["ol", "ul"]:
# Google Docs create sub lists as top level lists
if (not self.list) and (not self.lastWasList):
self.p()
if start:
if self.google_doc:
list_style = google_list_style(tag_style)
else:
list_style = tag
numbering_start = list_numbering_start(attrs)
self.list.append({'name':list_style, 'num':numbering_start})
else:
if self.list: self.list.pop()
self.lastWasList = True
else:
self.lastWasList = False
if tag == 'li':
self.pbr()
if start:
if self.list: li = self.list[-1]
else: li = {'name':'ul', 'num':0}
if self.google_doc:
nest_count = self.google_nest_count(tag_style)
else:
nest_count = len(self.list)
self.o(" " * nest_count) #TODO: line up <ol><li>s > 9 correctly.
if li['name'] == "ul": self.o(self.ul_item_mark + " ")
elif li['name'] == "ol":
li['num'] += 1
self.o(str(li['num'])+". ")
self.start = 1
if tag in ["table", "tr"] and start: self.p()
if tag == 'td': self.pbr()
if tag == "pre":
if start:
self.startpre = 1
self.pre = 1
else:
self.pre = 0
self.p()
def pbr(self):
if self.p_p == 0:
self.p_p = 1
def p(self):
self.p_p = 2
def soft_br(self):
self.pbr()
self.br_toggle = ' '
def o(self, data, puredata=0, force=0):
if self.abbr_data is not None:
self.abbr_data += data
if not self.quiet:
if self.google_doc:
# prevent white space immediately after 'begin emphasis' marks ('**' and '_')
lstripped_data = data.lstrip()
if self.drop_white_space and not (self.pre or self.code):
data = lstripped_data
if lstripped_data != '':
self.drop_white_space = 0
if puredata and not self.pre:
data = re.sub('\s+', ' ', data)
if data and data[0] == ' ':
self.space = 1
data = data[1:]
if not data and not force: return
if self.startpre:
#self.out(" :") #TODO: not output when already one there
if not data.startswith("\n"): # <pre>stuff...
data = "\n" + data
bq = (">" * self.blockquote)
if not (force and data and data[0] == ">") and self.blockquote: bq += " "
if self.pre:
if not self.list:
bq += " "
#else: list content is already partially indented
for i in xrange(len(self.list)):
bq += " "
data = data.replace("\n", "\n"+bq)
if self.startpre:
self.startpre = 0
if self.list:
data = data.lstrip("\n") # use existing initial indentation
if self.start:
self.space = 0
self.p_p = 0
self.start = 0
if force == 'end':
# It's the end.
self.p_p = 0
self.out("\n")
self.space = 0
if self.p_p:
self.out((self.br_toggle+'\n'+bq)*self.p_p)
self.space = 0
self.br_toggle = ''
if self.space:
if not self.lastWasNL: self.out(' ')
self.space = 0
if self.a and ((self.p_p == 2 and self.links_each_paragraph) or force == "end"):
if force == "end": self.out("\n")
newa = []
for link in self.a:
if self.outcount > link['outcount']:
self.out(" ["+ str(link['count']) +"]: " + urlparse.urljoin(self.baseurl, link['href']))
if has_key(link, 'title'): self.out(" ("+link['title']+")")
self.out("\n")
else:
newa.append(link)
if self.a != newa: self.out("\n") # Don't need an extra line when nothing was done.
self.a = newa
if self.abbr_list and force == "end":
for abbr, definition in self.abbr_list.items():
self.out(" *[" + abbr + "]: " + definition + "\n")
self.p_p = 0
self.out(data)
self.outcount += 1
def handle_data(self, data):
if r'\/script>' in data: self.quiet -= 1
if self.style:
self.style_def.update(dumb_css_parser(data))
if not self.maybe_automatic_link is None:
href = self.maybe_automatic_link
if href == data and self.absolute_url_matcher.match(href):
self.o("<" + data + ">")
return
else:
self.o("[")
self.maybe_automatic_link = None
if not self.code and not self.pre:
data = escape_md_section(data, snob=self.escape_snob)
self.o(data, 1)
def unknown_decl(self, data): pass
def charref(self, name):
if name[0] in ['x','X']:
c = int(name[1:], 16)
else:
c = int(name)
if not self.unicode_snob and c in unifiable_n.keys():
return unifiable_n[c]
else:
try:
return unichr(c)
except NameError: #Python3
return chr(c)
def entityref(self, c):
if not self.unicode_snob and c in unifiable.keys():
return unifiable[c]
else:
try: name2cp(c)
except KeyError: return "&" + c + ';'
else:
try:
return unichr(name2cp(c))
except NameError: #Python3
return chr(name2cp(c))
def replaceEntities(self, s):
s = s.group(1)
if s[0] == "#":
return self.charref(s[1:])
else: return self.entityref(s)
r_unescape = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
def unescape(self, s):
return self.r_unescape.sub(self.replaceEntities, s)
def google_nest_count(self, style):
"""calculate the nesting count of google doc lists"""
nest_count = 0
if 'margin-left' in style:
nest_count = int(style['margin-left'][:-2]) / self.google_list_indent
return nest_count
def optwrap(self, text):
"""Wrap all paragraphs in the provided text."""
if not self.body_width:
return text
assert wrap, "Requires Python 2.3."
result = ''
newlines = 0
for para in text.split("\n"):
if len(para) > 0:
if not skipwrap(para):
result += "\n".join(wrap(para, self.body_width))
if para.endswith(' '):
result += " \n"
newlines = 1
else:
result += "\n\n"
newlines = 2
else:
if not onlywhite(para):
result += para + "\n"
newlines = 1
else:
if newlines < 2:
result += "\n"
newlines += 1
return result
ordered_list_matcher = re.compile(r'\d+\.\s')
unordered_list_matcher = re.compile(r'[-\*\+]\s')
md_chars_matcher = re.compile(r"([\\\[\]\(\)])")
md_chars_matcher_all = re.compile(r"([`\*_{}\[\]\(\)#!])")
md_dot_matcher = re.compile(r"""
^ # start of line
(\s*\d+) # optional whitespace and a number
(\.) # dot
(?=\s) # lookahead assert whitespace
""", re.MULTILINE | re.VERBOSE)
md_plus_matcher = re.compile(r"""
^
(\s*)
(\+)
(?=\s)
""", flags=re.MULTILINE | re.VERBOSE)
md_dash_matcher = re.compile(r"""
^
(\s*)
(-)
(?=\s|\-) # followed by whitespace (bullet list, or spaced out hr)
# or another dash (header or hr)
""", flags=re.MULTILINE | re.VERBOSE)
slash_chars = r'\`*_{}[]()#+-.!'
md_backslash_matcher = re.compile(r'''
(\\) # match one slash
(?=[%s]) # followed by a char that requires escaping
''' % re.escape(slash_chars),
flags=re.VERBOSE)
def skipwrap(para):
# If the text begins with four spaces or one tab, it's a code block; don't wrap
if para[0:4] == ' ' or para[0] == '\t':
return True
# If the text begins with only two "--", possibly preceded by whitespace, that's
# an emdash; so wrap.
stripped = para.lstrip()
if stripped[0:2] == "--" and len(stripped) > 2 and stripped[2] != "-":
return False
# I'm not sure what this is for; I thought it was to detect lists, but there's
# a <br>-inside-<span> case in one of the tests that also depends upon it.
if stripped[0:1] == '-' or stripped[0:1] == '*':
return True
# If the text begins with a single -, *, or +, followed by a space, or an integer,
# followed by a ., followed by a space (in either case optionally preceeded by
# whitespace), it's a list; don't wrap.
if ordered_list_matcher.match(stripped) or unordered_list_matcher.match(stripped):
return True
return False
def wrapwrite(text):
text = text.encode('utf-8')
try: #Python3
sys.stdout.buffer.write(text)
except AttributeError:
sys.stdout.write(text)
def html2text(html, baseurl=''):
h = HTML2Text(baseurl=baseurl)
return h.handle(html)
def unescape(s, unicode_snob=False):
h = HTML2Text()
h.unicode_snob = unicode_snob
return h.unescape(s)
def escape_md(text):
"""Escapes markdown-sensitive characters within other markdown constructs."""
return md_chars_matcher.sub(r"\\\1", text)
def escape_md_section(text, snob=False):
"""Escapes markdown-sensitive characters across whole document sections."""
text = md_backslash_matcher.sub(r"\\\1", text)
if snob:
text = md_chars_matcher_all.sub(r"\\\1", text)
text = md_dot_matcher.sub(r"\1\\\2", text)
text = md_plus_matcher.sub(r"\1\\\2", text)
text = md_dash_matcher.sub(r"\1\\\2", text)
return text
def main():
baseurl = ''
p = optparse.OptionParser('%prog [(filename|url) [encoding]]',
version='%prog ' + __version__)
p.add_option("--ignore-emphasis", dest="ignore_emphasis", action="store_true",
default=IGNORE_EMPHASIS, help="don't include any formatting for emphasis")
p.add_option("--ignore-links", dest="ignore_links", action="store_true",
default=IGNORE_ANCHORS, help="don't include any formatting for links")
p.add_option("--ignore-images", dest="ignore_images", action="store_true",
default=IGNORE_IMAGES, help="don't include any formatting for images")
p.add_option("-g", "--google-doc", action="store_true", dest="google_doc",
default=False, help="convert an html-exported Google Document")
p.add_option("-d", "--dash-unordered-list", action="store_true", dest="ul_style_dash",
default=False, help="use a dash rather than a star for unordered list items")
p.add_option("-e", "--asterisk-emphasis", action="store_true", dest="em_style_asterisk",
default=False, help="use an asterisk rather than an underscore for emphasized text")
p.add_option("-b", "--body-width", dest="body_width", action="store", type="int",
default=BODY_WIDTH, help="number of characters per output line, 0 for no wrap")
p.add_option("-i", "--google-list-indent", dest="list_indent", action="store", type="int",
default=GOOGLE_LIST_INDENT, help="number of pixels Google indents nested lists")
p.add_option("-s", "--hide-strikethrough", action="store_true", dest="hide_strikethrough",
default=False, help="hide strike-through text. only relevant when -g is specified as well")
p.add_option("--escape-all", action="store_true", dest="escape_snob",
default=False, help="Escape all special characters. Output is less readable, but avoids corner case formatting issues.")
(options, args) = p.parse_args()
# process input
encoding = "utf-8"
if len(args) > 0:
file_ = args[0]
if len(args) == 2:
encoding = args[1]
if len(args) > 2:
p.error('Too many arguments')
if file_.startswith('http://') or file_.startswith('https://'):
baseurl = file_
j = urllib.urlopen(baseurl)
data = j.read()
if encoding is None:
try:
from feedparser import _getCharacterEncoding as enc
except ImportError:
enc = lambda x, y: ('utf-8', 1)
encoding = enc(j.headers, data)[0]
if encoding == 'us-ascii':
encoding = 'utf-8'
else:
data = open(file_, 'rb').read()
if encoding is None:
try:
from chardet import detect
except ImportError:
detect = lambda x: {'encoding': 'utf-8'}
encoding = detect(data)['encoding']
else:
data = sys.stdin.read()
data = data.decode(encoding)
h = HTML2Text(baseurl=baseurl)
# handle options
if options.ul_style_dash: h.ul_item_mark = '-'
if options.em_style_asterisk:
h.emphasis_mark = '*'
h.strong_mark = '__'
h.body_width = options.body_width
h.list_indent = options.list_indent
h.ignore_emphasis = options.ignore_emphasis
h.ignore_links = options.ignore_links
h.ignore_images = options.ignore_images
h.google_doc = options.google_doc
h.hide_strikethrough = options.hide_strikethrough
h.escape_snob = options.escape_snob
wrapwrite(h.handle(data))
if __name__ == "__main__":
main()
| 35.096175 | 129 | 0.53128 | [
"Apache-2.0"
] | awenz-uw/arlo | dev/html2text.py | 32,113 | Python |
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="pyk3x",
author="Roming22",
author_email="[email protected]",
description="API to simplify k3d deployments",
keywords="kuberbetes, k3s, k3d, k3x, cluster",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Roming22/pyk3x",
project_urls={
"Documentation": "https://github.com/Roming22/pyk3x",
"Bug Reports": "https://github.com/Roming22/pyk3x/issues",
"Source Code": "https://github.com/Roming22/pyk3x",
# 'Funding': '',
# 'Say Thanks!': '',
},
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
classifiers=[
# see https://pypi.org/classifiers/
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.7",
)
| 35.410256 | 66 | 0.6126 | [
"MIT"
] | Roming22/pyk3x | setup.py | 1,381 | Python |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
import os.path
import math
import tensorflow as tf
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
from sklearn.metrics import mean_absolute_error
LOGDIR = "/tmp/cnn_backbone_angles/"
# Parameters
batch_size = 5
training_epochs = 10
display_step = 1
internal_channels_1 = 100
internal_channels_2 = 100
internal_channels_3 = 100
internal_channels_4 = 50
window_size = 11
beta = 0.001
values_to_predict = 2
num_splits = 10
alpha = 0.2
dropout_keep_rate = 0.5
learning_rate = 1E-3
keep_prob = tf.placeholder_with_default(1.0, shape=(), name="keep_prob")
keep_prob_input = tf.placeholder_with_default(1.0, shape=(), name="keep_prob_input")
def fc_layer(input, size_in, size_out, name="fc"):
with tf.name_scope(name):
w = tf.Variable(tf.truncated_normal([window_size, size_in, size_out], stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[size_out]), name="B")
act = conv1d(input, w) + b
tf.summary.histogram("weights", w)
tf.summary.histogram("biases", b)
tf.summary.histogram("activations", act)
return act, w
def convnn(x, channels_num, layers_num, window_size = 11):
W_arr = []
layers = []
# First convolutional layer
input_dimensions = x.get_shape().as_list()[1:]
filter_shape = [window_size, input_dimensions[-1], channels_num]
W_input = weight_variable(filter_shape)
W_arr.append(W_input)
b_input = bias_variable([input_dimensions[0], channels_num])
input_layer = tf.nn.relu(conv1d(x, W_input) + b_input)
dropout_input = tf.nn.dropout(input_layer, keep_prob_input)
layers.append(dropout_input)
# Hidden layers
filter_shape = [window_size, channels_num, channels_num]
W_hidden = tf.constant([], dtype=tf.float32)
for i in range(layers_num):
with tf.name_scope("conv"):
W_hidden = weight_variable(filter_shape)
W_arr.append(W_hidden)
b_hidden = bias_variable([input_dimensions[0], channels_num])
conv_layer = tf.nn.tanh(alpha*conv1d(layers[i], W_hidden) + b_hidden)
tf.summary.histogram("weights", W_hidden)
tf.summary.histogram("biases", b_hidden)
tf.summary.histogram("activations", conv_layer)
with tf.name_scope("dropout"):
dropout = tf.nn.dropout(conv_layer, keep_prob)
layers.append(dropout)
# Output convolutional layer
layer_out, W_out = fc_layer(layers[-1], channels_num, values_to_predict)
W_arr.append(W_out)
# layer_out = tf.atan2(tf.sin(layer_out), tf.cos(layer_out))
# Loss function with L2 Regularization with beta=0.001
regularizers = tf.nn.l2_loss(W_input) + tf.nn.l2_loss(W_hidden) * layers_num + tf.nn.l2_loss(W_out)
# regularizers = tf.constant(0, dtype=tf.float32)
# for W in W_arr:
# regularizers += tf.nn.l2_loss(W)
return layer_out, regularizers
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name="W")
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name="B")
def conv1d(x, W):
"""conv1d returns a 1d convolution layer."""
return tf.nn.conv1d(x, W, 1, 'SAME')
def avgpool2d(x, k=2):
# MaxPool2D wrapper
return tf.nn.avg_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
def calculate_accuracy(predictions, labels):
num_proteins = predictions.shape[0]
protein_accuracy = np.zeros(num_proteins, dtype=np.float32)
label_accuracy = {1: {"total": 0, "correct": 0}, 2: {"total": 0, "correct": 0},
3: {"total": 0, "correct": 0}}
for i in range(num_proteins):
total_predictions = 0
correct_predictions = 0
for j in range(predictions.shape[1]):
phi = math.degrees(labels[i][j][0])
phi0 = math.degrees(predictions[i][j][0])
psi = math.degrees(labels[i][j][1])
psi0 = math.degrees(predictions[i][j][1])
if (phi != 0) or (psi != 0):
total_predictions += 1
expected_state = get_backbone_distribution(labels[i][j])
predicted_state = get_backbone_distribution(predictions[i][j])
label_accuracy[predicted_state]["total"] += 1
if (predicted_state == expected_state):
# correct_predictions += 1
label_accuracy[predicted_state]["correct"] += 1
# print("REAL PHI->>>>>"+str(labels[i][j][0]))
# print("PREDICTED PHI->>>>>" + str(predictions[i][j][0]))
diff = math.sqrt(math.pow(phi - phi0, 2)+math.pow(psi - psi0, 2))
diff_phi = phi0 - phi0
diff_psi = psi - psi0
criteria_1 = (np.abs(diff_phi) < 60) & (np.abs(diff_psi) < 60)
criteria_2 = (np.abs(diff_phi+diff_psi) < 60) & (np.abs(diff_psi) < 90) & (np.abs(diff_phi) < 90)
if (diff < 60):
correct_predictions += 1
# print("CORRECT->>>>>"+str(correct_predictions))
# print("TOTAL->>>>>" + str(total_predictions))
if (total_predictions > 0):
protein_accuracy[i] = correct_predictions / float(total_predictions)
accuracy_dist = {}
total = 0
correct = 0
for label, val in label_accuracy.iteritems():
if (val["total"] > 0):
accuracy_dist[label] = val["correct"]/val["total"]
total += val["total"]
correct += val["correct"]
if (total > 0):
accuracy_dist["total"] = correct/total
return protein_accuracy, accuracy_dist
def get_backbone_distribution(angles):
phi = math.degrees(angles[0])
psi = math.degrees(angles[1])
# A: -160 < phi <0 and -70 < psi < 60
if (-160 < phi < 0) & (-70 < psi < 60):
return 1
# P: 0 < phi < 160 and -60 < psi < 95
elif (0 < phi < 160) & (-60 < psi < 95):
return 2
else:
return 3
def plot_ramachandran(predictions, title):
phi_angles = predictions[:][:][0].flatten()
phi_angles = list(map(lambda x: math.degrees(x), phi_angles))
psi_angles = predictions[:][:][1].flatten()
psi_angles = list(map(lambda x: math.degrees(x), psi_angles))
colors = np.random.rand(len(psi_angles))
fig = plt.figure()
plt.xlim([-180, 180])
plt.ylim([-180, 180])
plt.title(title)
plt.xlabel('phi')
plt.ylabel('psi')
plt.grid()
plt.scatter(phi_angles, psi_angles, alpha=0.5, c=colors)
fig.savefig("./plots/" + title + ".png", bbox_inches='tight')
# plt.show()
# fig.savefig("./plots/" + title + ".png", bbox_inches='tight')
plt.close()
def plot_loss(loss_arr):
l = plt.figure()
plt.plot(loss_arr)
plt.title('Loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(plot_legend, loc='upper left')
l.show()
def make_hparam_string(layers_num, channels_num, test_session):
return "nl_%s,nc_%s, session%s" % (layers_num, channels_num, test_session)
def convert_to_degrees(arr):
"""Covert all phi and psi angles to degrees"""
arr[0] = math.degrees(arr[0])
arr[1] = math.degrees(arr[1])
return arr
data = np.load('phipsi_features.npz')['features']
all_data = data.reshape(data.shape[0],700,69)
# all_data = all_data[0:300]
all_sets = all_data[:,:,0:21]
all_sets = np.concatenate([all_sets, all_data[:,:,21:42]], axis=-1)
all_sets = np.concatenate([all_sets, all_data[:,:,42:63]], axis=-1)
# all_labels = all_data[:,:,63:67]
all_angles = all_data[:,:,67:69]
where_are_NaNs = np.isnan(all_angles)
all_angles[where_are_NaNs] = 0.0
k_fold = KFold(n_splits=num_splits)
layers_channels = [(6, 100), (7, 100)]
# Build the convolutional network
for layers_num, channels_num in layers_channels:
for use_l2 in [False, True]:
for use_early_stopping in [True, False]:
crossvalidation_train_accuracy = 0
crossvalidation_test_accuracy = 0
crossvalidation_accuracy_distr = {'total': 0, 1: 0, 2: 0, 3: 0}
crossvalidation_test_mae = 0
executed_epochs = 0
train_session = 0
test_session = 0
learning_rate_type = 1
for train_index, test_index in k_fold.split(all_sets):
train_set, test_set = all_sets[train_index], all_sets[test_index]
train_labels, test_labels = all_angles[train_index], all_angles[test_index]
train_size = train_set.shape[0]
train_y = train_labels
test_y = test_labels
test_session += 1
# Create the model
x = tf.placeholder(tf.float32, [None, 700, train_set[0].shape[-1]], name="x")
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 700, values_to_predict], name="labels")
y_nn, regularizers = convnn(x, channels_num, layers_num, window_size)
prediction = y_nn
with tf.name_scope("loss"):
deviations = tf.subtract(prediction, y_)
ae = tf.abs(deviations)
mae = tf.reduce_mean(ae)
atan2 = tf.atan2(tf.sin(deviations), tf.cos(deviations))
loss = tf.square(atan2, name="loss")
mean_loss = tf.reduce_mean(loss)
loss_summary = tf.summary.scalar("loss", mean_loss)
with tf.name_scope("loss2"):
# print(tf.shape(prediction))
# print(tf.shape(y_))
phi = prediction[:, :, 0]
phi0 = y_[:, :, 0]
psi = prediction[:, :, 1]
psi0 = y_[:,:, 1]
# cos_phi_diff = tf.square(tf.subtract(tf.cos(phi), tf.cos(phi0)))
# sin_phi_diff = tf.square(tf.subtract(tf.sin(phi), tf.sin(phi0)))
# cos_psi_diff = tf.square(tf.subtract(tf.cos(psi), tf.cos(psi0)))
# sin_psi_diff = tf.square(tf.subtract(tf.sin(psi), tf.sin(psi0)))
# phi_squared_sum = tf.add(cos_phi_diff, sin_phi_diff)
# psi_squared_sum = tf.add(cos_psi_diff, sin_psi_diff)
phi_diff = tf.reduce_sum(tf.squared_difference(phi, phi0))/2
psi_diff = tf.reduce_sum(tf.squared_difference(psi, psi0))/2
loss2 = tf.add(phi_diff, psi_diff)
with tf.name_scope("mse"):
mse = tf.squared_difference(prediction, y_)
mse_summary = tf.summary.scalar("mse", mse)
with tf.name_scope("l2_loss"):
l2_loss = beta * regularizers
if (use_l2):
loss = loss + l2_loss
loss = tf.reduce_mean(loss)
l2_summary = tf.summary.scalar("l2_loss", l2_loss)
with tf.name_scope("train"):
# Use Adam optimizer
optimization = tf.train.AdamOptimizer(learning_rate).minimize(loss)
# with tf.name_scope("accuracy"):
# correct_prediction = tf.equal(prediction, y)
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# tf.summary.scalar("accuracy", accuracy)
summ = tf.summary.merge_all()
print("Window size: " + str(window_size))
print("Layers: " + str(layers_num))
print("Channels: " + str(channels_num))
print("Beta: " + str(beta))
print("Use L2: " + str(use_l2))
print("Use Early stopping: " + str(use_early_stopping))
sess = tf.InteractiveSession()
init = tf.global_variables_initializer()
sess.run(init)
saver = tf.train.Saver()
min_delta = 0.01
plot_legend = []
previous_epoch_min = 100
min_validation_loss = 100
for epoch in range(training_epochs):
train_session += 1
loss_arr = []
previous_batch_loss = 0.0
patience = 6
patience_cnt = 0
hparam = make_hparam_string(layers_num, channels_num, train_session)
writer = tf.summary.FileWriter(LOGDIR + hparam)
writer.add_graph(sess.graph)
total_batches = int(train_size/batch_size)
# Loop over all batches
for i in range(total_batches):
start_index = i * batch_size
stop_index = (i+1) * batch_size
batch_x = train_set[start_index:stop_index]
batch_y = train_y[start_index:stop_index]
# Run optimization op
# backprop and cost op (to get loss value)
if i % 5 == 0:
batch_predictions, l_summ, batch_loss = sess.run([prediction, loss_summary, loss], feed_dict={x: batch_x, y_: batch_y, keep_prob: dropout_keep_rate, keep_prob_input: 0.8})
writer.add_summary(l_summ, i+1)
loss_arr.append(batch_loss)
saver.save(sess, os.path.join(LOGDIR, "model.ckpt"), i)
# batch_predictions = np.apply_along_axis(convert_to_degrees, 2, batch_predictions)
batch_accuracy, batch_distr = calculate_accuracy(batch_predictions, batch_y)
# print('step %d, training accuracy %g' % (i, np.average(batch_accuracy)))
# early stopping
if(use_early_stopping):
if (epoch > 2 and i > total_batches / 2 and batch_loss < previous_epoch_min):
previous_epoch_min = min(loss_arr)
print("Early stopping!!")
break
optimization.run(feed_dict={x: batch_x, y_: batch_y})
previous_epoch_min = min(loss_arr)
# Display logs per epoch step
if epoch % display_step == 0:
predictions, train_loss = sess.run([prediction,loss], feed_dict={x: train_set, y_: train_y, keep_prob: dropout_keep_rate, keep_prob_input: 0.8})
# predictions = np.apply_along_axis(convert_to_degrees, 2, predictions)
# plot_ramachandran(train_y, "Real values_"+str(epoch))
# raw_input()
train_accuracy, train_acc_distr = calculate_accuracy(predictions, train_y)
train_accuracy = np.average(train_accuracy)
crossvalidation_train_accuracy += train_accuracy
plot_legend.append('train_' + str(epoch))
# plot_loss(loss_arr)
# print("Training accuracy: ", \
# "{:.6f}".format(train_accuracy))
if (epoch > training_epochs / 2):
valid_predictions, valid_loss, valid_mae = sess.run([prediction, loss, mae], feed_dict={x: test_set, y_: test_y})
# valid_predictions = np.apply_along_axis(convert_to_degrees, 2, valid_predictions)
valid_accuracy, valid_acc_distr = calculate_accuracy(valid_predictions, test_y)
valid_accuracy = np.average(valid_accuracy)
if (epoch >= training_epochs - 1):
if (valid_loss < min_validation_loss):
training_epochs += 1
print("INCREASING EPOCHS")
else:
crossvalidation_test_accuracy += valid_accuracy
crossvalidation_test_mae += valid_mae
for label in valid_acc_distr:
crossvalidation_accuracy_distr[label] += valid_acc_distr[label]
print(crossvalidation_accuracy_distr)
if (epoch >= training_epochs - 2):
min_validation_loss = valid_loss
print(valid_acc_distr)
print("Validation accuracy: ", \
"{:.6f}".format(valid_accuracy))
executed_epochs += 1
# Test trained model
test_predictions, test_summ, test_mae = sess.run([prediction, loss_summary, mae], feed_dict={x: test_set, y_: test_y})
writer.add_summary(test_summ, i + 1)
test_accuracy, test_acc_distr = calculate_accuracy(test_predictions, test_y)
plot_ramachandran(test_predictions, "Predictions Fold "+str(test_session))
plot_ramachandran(test_y, "Real values Fold "+str(test_session))
# plot_legend.append('validation')
print(test_acc_distr)
# test_accuracy = np.average(test_accuracy)
# crossvalidation_test_accuracy += test_accuracy
# crossvalidation_test_mae += test_mae
# print("Testing accuracy: ", \
# "{:.6f}".format(test_accuracy))
for label in crossvalidation_accuracy_distr:
crossvalidation_accuracy_distr[label] /= num_splits
print(crossvalidation_accuracy_distr)
# print("Final Testing DISTR: ", \
# "{:.6f}".format(crossvalidation_test_mae / num_splits))
print("Final Testing MAE: ", \
"{:.6f}".format(crossvalidation_test_mae / num_splits))
# print("Final Training accuracy: ", \
# "{:.6f}".format(crossvalidation_train_accuracy / (num_splits*training_epochs)))
print("Final Test accuracy: ", \
"{:.6f}".format(crossvalidation_test_accuracy / num_splits))
print('Run `tensorboard --logdir=%s` to see the results.' % LOGDIR)
# valid_predictions = sess.run(tf.argmax(prediction, 2), feed_dict={x: valid_x, y_: valid_y})
# valid_labels = np.argmax(valid_y, 2)
# valid_accuracy = calculate_accuracy(valid_predictions, valid_labels)
# print("Validation accuracy: ", \
# "{:.6f}".format(valid_accuracy)) | 46.062954 | 199 | 0.568545 | [
"MIT"
] | Graveheart/ProteinSSPrediction | cnn_phi_psi.py | 19,024 | Python |
"""desafio URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('vagas.urls')),
]
| 33.166667 | 77 | 0.701005 | [
"MIT"
] | NathanMilhomen/vagas | desafio/urls.py | 796 | Python |
from django.urls import path
from . import views
urlpatterns = [
path('', views.IndexView.as_view()),
path('non-release/', views.NonRelease.as_view()),
]
| 20.375 | 53 | 0.680982 | [
"MIT"
] | chen1i6c04/Benga | frontend/urls.py | 163 | Python |
from punc_tokenizer import PuncTokenizer
| 20.5 | 40 | 0.902439 | [
"MIT"
] | liuxiaoan8008/sklearn-plus | sklearn_plus/preprocessing/text/en/__init__.py | 41 | Python |
import os
import random
import threading
from time import sleep
from unittest import TestCase
import asn1tools
import wx
import asn1editor
from asn1editor.wxPython.ViewSelect import ViewType
from tests import testHelper
def actions(main_window: asn1editor.wxPython.MainWindow):
def get_children(window: wx.Window):
my_children = window.GetChildren()
if my_children is not None:
their_children = []
for my_child in my_children:
their_children += get_children(my_child)
return list(my_children) + their_children
else:
return []
sleep(1)
key_codes = [wx.WXK_TAB, wx.WXK_DOWN, wx.WXK_UP, wx.WXK_LEFT, wx.WXK_RIGHT, wx.WXK_SPACE] + [c for c in range(ord('1'), ord('9'))]
ui_sim = wx.UIActionSimulator()
for _ in range(1000):
main_window.SetFocus()
key_code = random.choice(key_codes)
ui_sim.KeyDown(key_code)
ui_sim.KeyUp(key_code)
try:
main_window.save_data_to_file('test.json')
except asn1tools.ConstraintsError:
pass
main_window.Close(True)
wx.GetApp().ExitMainLoop()
class MonkeyTest(TestCase):
@staticmethod
def test_monkey():
if os.getenv('TRAVIS') is not None or os.getenv('GITHUB_ACTIONS') is not None:
return
# noinspection PyUnusedLocal
app = testHelper.get_wx_app()
main_window = asn1editor.wxPython.MainWindow()
main_window.select_view(ViewType.GROUPS)
test_types = [('example/example.asn', 'EXAMPLE.Sequence')]
for spec, type_ in test_types:
main_window.load_spec(spec, type_)
action_thread = threading.Thread(target=actions, args=[main_window])
action_thread.start()
main_window.Show()
app.MainLoop()
action_thread.join(timeout=0.0)
| 28.846154 | 134 | 0.654933 | [
"MIT"
] | Groops78/asn1editor | tests/test_MonkeyTest.py | 1,875 | Python |
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.33.0"
class SDL2Conan(ConanFile):
# TODO: When porting to CCI rename this package to SDL (without 2)
name = "sdl2"
description = "Access to audio, keyboard, mouse, joystick, and graphics hardware via OpenGL, Direct3D and Vulkan"
topics = ("sdl2", "audio", "keyboard", "graphics", "opengl")
url = "https://github.com/bincrafters/conan-sdl2"
homepage = "https://www.libsdl.org"
license = "Zlib"
exports_sources = ["CMakeLists.txt", "patches/*"]
generators = ["cmake", "pkg_config"]
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"directx": [True, False],
"alsa": [True, False],
"jack": [True, False],
"pulse": [True, False],
"sndio": [True, False],
"nas": [True, False],
"esd": [True, False],
"arts": [True, False],
"x11": [True, False],
"xcursor": [True, False],
"xinerama": [True, False],
"xinput": [True, False],
"xrandr": [True, False],
"xscrnsaver": [True, False],
"xshape": [True, False],
"xvm": [True, False],
"wayland": [True, False],
"directfb": [True, False],
"iconv": [True, False],
"video_rpi": [True, False],
"sdl2main": [True, False],
"opengl": [True, False],
"opengles": [True, False],
"vulkan": [True, False],
"libunwind": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"directx": True,
"alsa": True,
"jack": True,
"pulse": True,
"sndio": False,
"nas": True,
"esd": False,
"arts": False,
"x11": True,
"xcursor": True,
"xinerama": True,
"xinput": True,
"xrandr": True,
"xscrnsaver": True,
"xshape": True,
"xvm": True,
"wayland": False,
"directfb": False,
"iconv": True,
"video_rpi": False,
"sdl2main": True,
"opengl": True,
"opengles": True,
"vulkan": True,
"libunwind": True,
}
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
_cmake = None
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
if self.settings.os != "Linux":
del self.options.alsa
del self.options.jack
del self.options.pulse
del self.options.sndio
del self.options.nas
del self.options.esd
del self.options.arts
del self.options.x11
del self.options.xcursor
del self.options.xinerama
del self.options.xinput
del self.options.xrandr
del self.options.xscrnsaver
del self.options.xshape
del self.options.xvm
del self.options.wayland
del self.options.directfb
del self.options.video_rpi
del self.options.libunwind
if self.settings.os != "Windows":
del self.options.directx
def configure(self):
if self.options.shared:
del self.options.fPIC
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
if self.settings.os == "Macos" and not self.options.iconv:
raise ConanInvalidConfiguration("On macOS iconv can't be disabled")
def requirements(self):
if self.options.iconv:
self.requires("libiconv/1.16")
if self.settings.os == "Linux":
self.requires("xorg/system")
if self.options.alsa:
self.requires("libalsa/1.2.4")
if self.options.pulse:
self.requires("pulseaudio/13.0")
if self.options.opengl:
self.requires("opengl/system")
if self.options.get_safe("libunwind", False):
self.requires("libunwind/1.5.0")
def package_id(self):
del self.info.options.sdl2main
def build_requirements(self):
if self.settings.os == "Linux":
self.build_requires("pkgconf/1.7.3")
def system_requirements(self):
if self.settings.os == "Linux" and tools.os_info.is_linux:
if tools.os_info.with_apt or tools.os_info.with_yum:
installer = tools.SystemPackageTool()
packages = []
packages_apt = []
packages_yum = []
packages_apt.append("libgbm-dev")
packages_yum.append("mesa-libgbm-devel")
if self.options.jack:
packages_apt.append("libjack-dev")
packages_yum.append("jack-audio-connection-kit-devel")
if self.options.sndio:
packages_apt.append("libsndio-dev")
if self.options.nas:
packages_apt.append("libaudio-dev")
packages_yum.append("nas-devel")
if self.options.esd:
packages_apt.append("libesd0-dev")
packages_yum.append("esound-devel")
if self.options.arts:
packages_apt.append("artsc0-dev")
if self.options.wayland:
packages_apt.extend(["libwayland-dev",
"wayland-protocols"])
packages_yum.extend(["wayland-devel",
"wayland-protocols-devel"])
if self.options.directfb:
packages_apt.append("libdirectfb-dev")
if tools.os_info.with_apt:
packages = packages_apt
elif tools.os_info.with_yum:
packages = packages_yum
for package in packages:
installer.install(package)
def source(self):
tools.get(**self.conan_data["sources"][self.version], strip_root=True, destination=self._source_subfolder)
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
if tools.Version(self.version) >= "2.0.14":
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeLists.txt"),
'check_library_exists(c iconv_open "" HAVE_BUILTIN_ICONV)',
'# check_library_exists(c iconv_open "" HAVE_BUILTIN_ICONV)')
self._build_cmake()
def _check_pkg_config(self, option, package_name):
if option:
pkg_config = tools.PkgConfig(package_name)
if not pkg_config.provides:
raise ConanInvalidConfiguration("package %s is not available" % package_name)
def _check_dependencies(self):
if self.settings.os == "Linux":
self._check_pkg_config(self.options.jack, "jack")
self._check_pkg_config(self.options.esd, "esound")
self._check_pkg_config(self.options.wayland, "wayland-client")
self._check_pkg_config(self.options.wayland, "wayland-protocols")
self._check_pkg_config(self.options.directfb, "directfb")
def _configure_cmake(self):
if not self._cmake:
self._check_dependencies()
self._cmake = CMake(self)
# FIXME: self.install_folder not defined? Neccessary?
self._cmake.definitions["CONAN_INSTALL_FOLDER"] = self.install_folder
if self.settings.os != "Windows":
if not self.options.shared:
self._cmake.definitions["SDL_STATIC_PIC"] = self.options.fPIC
if self.settings.compiler == "Visual Studio" and not self.options.shared:
self._cmake.definitions["HAVE_LIBC"] = True
self._cmake.definitions["SDL_SHARED"] = self.options.shared
self._cmake.definitions["SDL_STATIC"] = not self.options.shared
self._cmake.definitions["VIDEO_OPENGL"] = self.options.opengl
self._cmake.definitions["VIDEO_OPENGLES"] = self.options.opengles
self._cmake.definitions["VIDEO_VULKAN"] = self.options.vulkan
if self.settings.os == "Linux":
# See https://github.com/bincrafters/community/issues/696
self._cmake.definitions["SDL_VIDEO_DRIVER_X11_SUPPORTS_GENERIC_EVENTS"] = 1
self._cmake.definitions["ALSA"] = self.options.alsa
if self.options.alsa:
self._cmake.definitions["HAVE_ASOUNDLIB_H"] = True
self._cmake.definitions["HAVE_LIBASOUND"] = True
self._cmake.definitions["JACK"] = self.options.jack
self._cmake.definitions["PULSEAUDIO"] = self.options.pulse
self._cmake.definitions["SNDIO"] = self.options.sndio
self._cmake.definitions["NAS"] = self.options.nas
self._cmake.definitions["VIDEO_X11"] = self.options.x11
if self.options.x11:
self._cmake.definitions["HAVE_XEXT_H"] = True
self._cmake.definitions["VIDEO_X11_XCURSOR"] = self.options.xcursor
if self.options.xcursor:
self._cmake.definitions["HAVE_XCURSOR_H"] = True
self._cmake.definitions["VIDEO_X11_XINERAMA"] = self.options.xinerama
if self.options.xinerama:
self._cmake.definitions["HAVE_XINERAMA_H"] = True
self._cmake.definitions["VIDEO_X11_XINPUT"] = self.options.xinput
if self.options.xinput:
self._cmake.definitions["HAVE_XINPUT_H"] = True
self._cmake.definitions["VIDEO_X11_XRANDR"] = self.options.xrandr
if self.options.xrandr:
self._cmake.definitions["HAVE_XRANDR_H"] = True
self._cmake.definitions["VIDEO_X11_XSCRNSAVER"] = self.options.xscrnsaver
if self.options.xscrnsaver:
self._cmake.definitions["HAVE_XSS_H"] = True
self._cmake.definitions["VIDEO_X11_XSHAPE"] = self.options.xshape
if self.options.xshape:
self._cmake.definitions["HAVE_XSHAPE_H"] = True
self._cmake.definitions["VIDEO_X11_XVM"] = self.options.xvm
if self.options.xvm:
self._cmake.definitions["HAVE_XF86VM_H"] = True
self._cmake.definitions["VIDEO_WAYLAND"] = self.options.wayland
self._cmake.definitions["VIDEO_DIRECTFB"] = self.options.directfb
self._cmake.definitions["VIDEO_RPI"] = self.options.video_rpi
elif self.settings.os == "Windows":
self._cmake.definitions["DIRECTX"] = self.options.directx
self._cmake.definitions["HAVE_LIBUNWIND_H"] = self.options.get_safe("libunwind")
self._cmake.configure(build_dir=self._build_subfolder)
return self._cmake
def _build_cmake(self):
if self.options.get_safe("pulse"):
tools.rename("libpulse.pc", "libpulse-simple.pc")
lib_paths = [lib for dep in self.deps_cpp_info.deps for lib in self.deps_cpp_info[dep].lib_paths]
with tools.environment_append({"LIBRARY_PATH": os.pathsep.join(lib_paths)}):
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy(pattern="COPYING.txt", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.remove_files_by_mask(os.path.join(self.package_folder, "bin"), "sdl2-config")
tools.rmdir(os.path.join(self.package_folder, "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "libdata"))
tools.rmdir(os.path.join(self.package_folder, "share"))
def _add_libraries_from_pc(self, library, static=None):
if static is None:
static = not self.options.shared
pkg_config = tools.PkgConfig(library, static=static)
libs = [lib[2:] for lib in pkg_config.libs_only_l] # cut -l prefix
lib_paths = [lib[2:] for lib in pkg_config.libs_only_L] # cut -L prefix
self.cpp_info.components["libsdl2"].system_libs.extend(libs)
self.cpp_info.components["libsdl2"].libdirs.extend(lib_paths)
self.cpp_info.components["libsdl2"].sharedlinkflags.extend(pkg_config.libs_only_other)
self.cpp_info.components["libsdl2"].exelinkflags.extend(pkg_config.libs_only_other)
def package_info(self):
self.cpp_info.names["cmake_find_package"] = "SDL2"
self.cpp_info.names["cmake_find_package_multi"] = "SDL2"
postfix = "d" if self.settings.build_type == "Debug" else ""
# SDL2
sdl2_cmake_target = "SDL2" if self.options.shared else "SDL2-static"
self.cpp_info.components["libsdl2"].names["cmake_find_package"] = sdl2_cmake_target
self.cpp_info.components["libsdl2"].names["cmake_find_package_multi"] = sdl2_cmake_target
self.cpp_info.components["libsdl2"].includedirs.append(os.path.join("include", "SDL2"))
self.cpp_info.components["libsdl2"].libs = ["SDL2" + postfix]
if self.options.iconv:
self.cpp_info.components["libsdl2"].requires.append("libiconv::libiconv")
if self.settings.os == "Linux":
self.cpp_info.components["libsdl2"].system_libs = ["dl", "rt", "pthread"]
self.cpp_info.components["libsdl2"].requires.append("xorg::xorg")
if self.options.alsa:
self.cpp_info.components["libsdl2"].requires.append("libalsa::libalsa")
if self.options.pulse:
self.cpp_info.components["libsdl2"].requires.append("pulseaudio::pulseaudio")
if self.options.opengl:
self.cpp_info.components["libsdl2"].requires.append("opengl::opengl")
if self.options.jack:
self._add_libraries_from_pc("jack")
if self.options.sndio:
self._add_libraries_from_pc("sndio")
if self.options.nas:
self.cpp_info.components["libsdl2"].system_libs.append("audio")
if self.options.esd:
self._add_libraries_from_pc("esound")
if self.options.directfb:
self._add_libraries_from_pc("directfb")
if self.options.video_rpi:
self.cpp_info.components["libsdl2"].system_libs.append("bcm_host")
self.cpp_info.components["libsdl2"].includedirs.extend([
"/opt/vc/include",
"/opt/vc/include/interface/vcos/pthreads",
"/opt/vc/include/interface/vmcs_host/linux"
])
self.cpp_info.components["libsdl2"].libdirs.append("/opt/vc/lib")
self.cpp_info.components["libsdl2"].sharedlinkflags.append("-Wl,-rpath,/opt/vc/lib")
self.cpp_info.components["libsdl2"].exelinkflags.append("-Wl,-rpath,/opt/vc/lib")
elif self.settings.os == "Macos":
self.cpp_info.components["libsdl2"].frameworks = ["Cocoa", "Carbon", "IOKit", "CoreVideo", "CoreAudio", "AudioToolbox", "ForceFeedback"]
if tools.Version(self.version) >= "2.0.14":
self.cpp_info.components["libsdl2"].frameworks.append("Metal")
elif self.settings.os == "Windows":
self.cpp_info.components["libsdl2"].system_libs = ["user32", "gdi32", "winmm", "imm32", "ole32", "oleaut32", "version", "uuid", "advapi32", "setupapi", "shell32"]
if self.settings.compiler == "gcc":
self.cpp_info.components["libsdl2"].system_libs.append("mingw32")
if self.options.get_safe("libunwind"):
self.cpp_info.components["libsdl2"].requires.append("libunwind::libunwind")
# SDL2main
if self.options.sdl2main:
self.cpp_info.components["sdl2main"].names["cmake_find_package"] = "SDL2main"
self.cpp_info.components["sdl2main"].names["cmake_find_package_multi"] = "SDL2main"
self.cpp_info.components["sdl2main"].libs = ["SDL2main" + postfix]
self.cpp_info.components["sdl2main"].requires = ["libsdl2"]
| 46.505618 | 174 | 0.59181 | [
"MIT"
] | Rapatas/community | recipes/sdl2/all/conanfile.py | 16,556 | Python |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from sqlalchemy.ext.declarative import declared_attr
from indico.core.db.sqlalchemy import db
from indico.core.db.sqlalchemy.util.models import auto_table_args
from indico.core.settings.models.base import JSONSettingsBase, PrincipalSettingsBase
from indico.util.decorators import strict_classproperty
from indico.util.string import return_ascii
class CoreSettingsMixin(object):
@strict_classproperty
@staticmethod
def __auto_table_args():
return (db.Index(None, 'module', 'name'),
{'schema': 'indico'})
class Setting(JSONSettingsBase, CoreSettingsMixin, db.Model):
@strict_classproperty
@staticmethod
def __auto_table_args():
return db.UniqueConstraint('module', 'name'),
@declared_attr
def __table_args__(cls):
return auto_table_args(cls)
@return_ascii
def __repr__(self):
return '<Setting({}, {}, {!r})>'.format(self.module, self.name, self.value)
class SettingPrincipal(PrincipalSettingsBase, CoreSettingsMixin, db.Model):
principal_backref_name = 'in_settings_acls'
@declared_attr
def __table_args__(cls):
return auto_table_args(cls)
@return_ascii
def __repr__(self):
return '<SettingPrincipal({}, {}, {!r})>'.format(self.module, self.name, self.principal)
| 29.826923 | 96 | 0.727917 | [
"MIT"
] | UNOG-Indico/UNOG-Indico-v2 | indico/core/settings/models/settings.py | 1,551 | Python |
import unittest
from collections import namedtuple
import m
sample = """\
ab
ac
b
b\
"""
TestCase = namedtuple("TestCase", ["text", "output"])
class TestDec6(unittest.TestCase):
def test_get_groups(self):
cases = [
TestCase(sample, [['ab', 'ac'], ['b', 'b']]),
]
for c in cases:
result = m.read_groups(c.text)
self.assertEqual(result, c.output, c)
def test_count_answers(self):
cases = [
TestCase(sample, [3, 1]),
]
for c in cases:
groups = m.read_groups(c.text)
nrs = []
for group in groups:
result = m.union_answers(group)
nrs.append(result)
self.assertEqual(nrs, c.output, c)
def test_count_intersection_answers(self):
cases = [
TestCase(sample, [1, 1]),
]
for c in cases:
groups = m.read_groups(c.text)
nrs = []
for group in groups:
result = m.intersection_answers(group)
nrs.append(result)
self.assertEqual(nrs, c.output, c)
if __name__ == '__main__':
unittest.main()
| 22.074074 | 57 | 0.519295 | [
"MIT"
] | einarssons/adventofcode2020 | dec06/test.py | 1,192 | Python |
decrescente = True
anterior = int(input("Digite o primeiro número da sequência: "))
valor = 1
while valor != 0 and decrescente:
valor = int(input("Digite o próximo número da sequência: "))
if valor > anterior:
decrescente = False
anterior = valor
if decrescente:
print("A sequência está em ordem decrescente! :-) ")
else:
print("A sequência não está em ordem decrescente! :-)") | 25.625 | 65 | 0.673171 | [
"MIT"
] | renarfreitas/Coursera | indicadordepassagem.py | 420 | Python |
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
train_data = np.load("E:\\quant_research\\train the rank of ten points\\RNN_point\\data\\train_data_10num.npy")
train_aim = np.load("E:\\quant_research\\train the rank of ten points\\RNN_point\\data\\train_label_10num.npy")
train_data = train_data.reshape(train_data.shape[0],10,1)
train_data = train_data.swapaxes(0, 1)
train_data = torch.from_numpy(train_data).type(torch.FloatTensor)
train_aim = torch.from_numpy(train_aim).type(torch.FloatTensor)
test_data = np.load("E:\\quant_research\\train the rank of ten points\\RNN_point\\data\\test_data_10num.npy")
test_aim = np.load("E:\\quant_research\\train the rank of ten points\\RNN_point\\data\\test_label_10num.npy")
test_data = test_data.reshape(test_data.shape[0],10,1)
test_data = test_data.swapaxes(0, 1)
test_data = torch.from_numpy(test_data).type(torch.FloatTensor)
test_aim = torch.from_numpy(test_aim).type(torch.FloatTensor)
class Encoder(nn.Module):
def __init__(self, input_size, hidden_size, batch_size, bidirectional=True):
super(Encoder, self).__init__()
self.hidden_size = hidden_size
self.input_size = input_size
self.batch_size = batch_size
self.bidirectional = bidirectional
self.lstm = nn.LSTM(input_size, hidden_size, batch_first=False, bidirectional=bidirectional)
def forward(self, inputs, hidden):
output, hidden = self.lstm(inputs, hidden)
return output, hidden
def init_hidden(self):
return (torch.zeros(1 + int(self.bidirectional), self.batch_size, self.hidden_size),
torch.zeros(1 + int(self.bidirectional), self.batch_size, self.hidden_size)) #(num_layers * num_directions, batch, hidden_size)
class AttentionDecoder(nn.Module):
def __init__(self, hidden_size, output_size, batch_size, vocab_size,seq_len):
super(AttentionDecoder, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.batch_size = batch_size
self.seq_len = seq_len
self.vocab_size = vocab_size
self.attn = nn.Linear(hidden_size + output_size + vocab_size, 1)
self.lstm = nn.LSTM(hidden_size + vocab_size, output_size)
self.final = nn.Linear(output_size, vocab_size)
def init_hidden(self):
return (torch.zeros(1, self.batch_size, self.output_size),
torch.zeros(1, self.batch_size, self.output_size))
def forward(self, decoder_hidden, encoder_outputs, input):
seq = 0
weights= []
i = 0
output = torch.zeros(self.batch_size, self.vocab_size)
for i in range(len(encoder_outputs)):
weights.append(self.attn(torch.cat((decoder_hidden[0][:].squeeze(0),encoder_outputs[i],output), dim=1)))
normalized_weight = F.softmax(torch.cat(weights, 1), 1)
normalized_weights = normalized_weight
attn_applied = torch.bmm(normalized_weight.unsqueeze(1),
encoder_outputs.transpose(0,1))
input_lstm = torch.cat((attn_applied.transpose(0,1)[0], output),
dim=1) # if we are using embedding, use embedding of input here instead
output_, hidden = self.lstm(input_lstm.unsqueeze(0), decoder_hidden)
output = self.final(output_[0]) #output 为(vocab_size, output_size)
#output = self.final2(output)
# hidden0 = hidden[0].transpose(0, 1).reshape(batch_size, 1, -1).transpose(0, 1)
# hidden1 = hidden[1].transpose(0, 1).reshape(batch_size, 1, -1).transpose(0, 1)
# decoder_hidden = (hidden0, hidden1)
# decoder_hiddens = decoder_hidden
out = F.softmax(output,1)
return out
seq_len = 10
input_size = 1
hidden_size = 2
batch_size = train_data.shape[1]
bidirectional = True
output_size = hidden_size * (1 + bidirectional)
vocal_size = 10
input = []
for i in range(10):
m = np.ones((10000,10))*i
input.append(m)
input = np.array(input)
input = torch.from_numpy(input).type(torch.FloatTensor)
class pointer_atten(nn.Module):
def __init__(self):
super(pointer_atten, self).__init__()
self.layer1 = Encoder(input_size = input_size,
hidden_size = hidden_size,
batch_size = batch_size,
bidirectional=True)
self.layer2 = AttentionDecoder(
hidden_size = hidden_size * (1 + bidirectional),
output_size = output_size,
batch_size = batch_size,
vocab_size = vocal_size,
seq_len = 1
)
def forward(self,x):
output, hidden = self.layer1.forward(x, self.layer1.init_hidden())
hidden0 = hidden[0].transpose(0, 1).reshape(batch_size, 1, -1).transpose(0, 1)
hidden1 = hidden[1].transpose(0, 1).reshape(batch_size, 1, -1).transpose(0, 1)
decoder_hidden = (hidden0, hidden1)
encoder_outputs = output
last_output = self.layer2.forward(decoder_hidden, output, input)
return last_output
Net = pointer_atten()
learning_rate = 0.05
Loss = nn.MSELoss(reduction='mean')
optimizer = torch.optim.Adam(Net.parameters(), lr=learning_rate)
###########################################
# train
###########################################
loss_list = []
True_list = []
num_epochs = 10000
epoch = 10000
batch = train_aim.detach().numpy().size
Net.load_state_dict(torch.load('E:\\quant_research\\train the rank of ten points\\RNN_point\\net_10num\\net720.pkl'))
for epoch in range(1000):
train_data = Variable(train_data,requires_grad=True)
train_aim = Variable(train_aim,requires_grad=True)
# Forward pass
outputs = Net(train_data)
loss = Loss(outputs, train_aim)
loss_list.append(loss)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch) % 10 == 0:
print ('Epoch [{}/{}], Loss: {:.4f}'
.format(epoch+1,num_epochs,loss.item()))
is_not = outputs.detach().numpy() - train_aim.detach().numpy()
is_not = np.where(is_not < -0.1, 10, is_not)
is_not = np.where(is_not < 0.1, 1, 0)
T_pre = np.nansum(is_not)
True_rate = T_pre / batch
True_list.append(True_rate)
print('accuracy of prediction in training data:', True_rate)
if epoch % 10 ==0:
torch.save(Net.state_dict(), 'E:\\quant_research\\train the rank of ten points\\\RNN_point\\net_10num\\net{}.pkl'.format(epoch))
loss_array = np.array(loss_list)
true_array = np.array(True_list)
np.save('E:\\quant_research\\train the rank of ten points\\\RNN_point\\loss',loss_array)
np.save('E:\\quant_research\\train the rank of ten points\\\RNN_point\\true',true_array)
loss_array = np.load('E:\\quant_research\\train the rank of ten points\\\RNN_point\\loss.npy',allow_pickle=True)
true_array = np.load('E:\\quant_research\\train the rank of ten points\\\RNN_point\\true.npy')
outputs = Net(train_data)
loss = Loss(outputs, train_aim)
label = np.argmax(outputs.detach().numpy(),axis = 1)
label_aim = np.argmax(train_aim.detach().numpy(),axis = 1)
True_rate = np.sum(label == label_aim) / 10000
print('loss in testing data:%.5f,accuracy of prediction in testing data:%.5f'%(loss,True_rate))
outputs = Net(test_data)
loss = Loss(outputs, test_aim)
label = np.argmax(outputs.detach().numpy(),axis = 1)
label_aim = np.argmax(test_aim.detach().numpy(),axis = 1)
True_rate = np.sum(label == label_aim) / 10000
print('loss in training data:%.5f,accuracy of prediction in training data:%.5f'%(loss,True_rate))
| 37.589623 | 146 | 0.648638 | [
"Apache-2.0"
] | 00wuweimin/jubilant-dollop | pointer_network.py | 7,979 | Python |
from projections import *
from urllib2 import urlopen
from httplib import HTTPConnection
from threading import Thread
from kivy.logger import Logger
from kivy.loader import Loader
from os.path import join, dirname
import time, os
import hashlib
GMLNS = "http://www.opengis.net/gml"
try:
from pyproj import Proj
from lxml.etree import ElementTree as ET
except:
# try:
from xml.etree import ElementTree as ET
# except:
# pass
class WFSOverlayServer(object):
cache = {}
available_maptype = dict(roadmap = 'Roadmap') # default
type = "wfs" # TODO: replace handling in mapviewer with action handlers in the overlay class
def __init__(self, progress_callback=None):
self.progress_callback = progress_callback
def setProgressCallback(self, progress_callback):
self.progress_callback = progress_callback
def load(self, url):
# read from internet
blocksize = 4096
self.progress_callback(0)
fd = urlopen(url)
idata = fd.read(blocksize)
loaded = blocksize
while True:
bdata = fd.read(blocksize)
if not bdata: break
loaded += blocksize
if self.progress_callback:
self.progress_callback(loaded)
idata += bdata
fd.close()
self.progress_callback(-1)
return idata
def findGeometry(self, elem):
geoms = elem.find("{%s}Point" % GMLNS)
if geoms is not None:
return geoms
geoms = elem.find("{%s}LinearRing" % GMLNS)
if geoms is not None:
return geoms
for c in elem.getchildren():
geom = self.findGeometry(c)
if geom is not None:
return geom
def findGeometries(self, members):
geoms = []
for m in members:
geom = self.findGeometry(m)
if geom is not None:
geoms.append(geom)
return geoms
def get(self, parent, width, height):
self.bl = parent.bottom_left
self.tr = parent.top_right
self.zoom = parent.zoom
url = self.geturl(self.bl[0], self.bl[1], self.tr[0], self.tr[1])
if not url:
return None
key = hashlib.md5(url).hexdigest()
if key in self.cache:
return self.cache[key]
try:
xml = self.load('http://' + self.provider_host + url)
tree = ET.fromstring(xml)
members = tree.findall("{%s}featureMember" % GMLNS)
self.geometries = self.findGeometries(members)
self.cache[key] = self.geometries
return self.geometries
except Exception,e:
Logger.error('OverlayServer could not find (or read) WFS from %s [%s]' % (url, e))
image = None
def getInfoText(self, member):
fields = member.getchildren()[0].getchildren()
info = ""
for field in fields:
if field.text is not None and field.text.strip() != "":
info += "%s: %s\n" % (field.tag[field.tag.index("}")+1:], field.text)
return info
def getInfo(self, lat, lon, epsilon):
try:
url = self.geturl(lat-epsilon, lon-epsilon, lat+epsilon, lon+epsilon)
except:
return None
try:
xml = self.load('http://' + self.provider_host + url)
tree = ET.fromstring(xml)
member = tree.find("{%s}featureMember" % GMLNS)
if member is not None:
infotext = self.getInfoText(member)
return infotext
except Exception,e:
Logger.error('OverlayServer could not find (or read) WFS from %s [%s]' % (url, e))
return None
def xy_to_co(self, lat, lon):
if self.customBounds:
x, y = latlon_to_custom(lat, lon, self.bounds)
elif self.isPLatLon: # patch for android - does not require pyproj library
x, y = lon, lat
elif self.isPGoogle: # patch for android - does not require pyproj library
x, y = latlon_to_google (lat, lon)
else:
x, y = transform(pLatlon, self.projection, lon, lat)
return x,y
def co_to_ll(self,x,y):
if self.customBounds:
l, m = custom_to_latlon(x, y, self.bounds)
elif self.isPLatLon: # patch for android - does not require pyproj library
l, m = y, x
elif self.isPGoogle: # patch for android - does not require pyproj library
l, m = google_to_latlon (y, x)
else:
l, m = transform(self.projection, pLatlon, y, x)
return l, m
def geturl(self, lat1, lon1, lat2, lon2):
try:
x1, y1 = self.xy_to_co(lat1, lon1)
x2, y2 = self.xy_to_co(lat2, lon2)
return self.url + "&bbox=%f,%f,%f,%f" % (x1, y1, x2, y2)
except RuntimeError, e:
return None
def parseFeature(self, feature, data):
try:
name = feature.find("Name").text
title = feature.find("Title").text
except:
name = None
title = None
srss = feature.findall("DefaultSRS")
if name:# and srss:
data[name] = map(lambda x:x.text, srss)
if self.debug:
print "Provider %s provides feature %s in projections %s" % (self.provider_host, name, data[name])
def initFromGetCapabilities(self, host, baseurl, feature = None, index = 0, srs = None):
self.debug = (feature == None) and (index == 0)
# GetCapabilities (Features + SRS)
capabilities = urlopen(host + baseurl + "?SERVICE=WFS&Request=GetCapabilities").read().strip()
try:
tree = ET.fromstring(capabilities)
if self.debug:
ET.dump(tree)
features = tree.findall("FeatureType") #TODO: proper parsing of cascading layers and their SRS
data = {}
for f in features:
self.parseFeature(f, data)
# Choose Feature and SRS by (alphabetical) index
if feature is None:
feature = sorted(data.keys())[index]
if srs is None:
srs = sorted(data[feature])[0]
except:
pass
print "Displaying from %s/%s: feature %s in SRS %s." % (host, baseurl, feature, srs)
# generate tile URL and init projection by EPSG code
self.feature = feature
self.url = baseurl + "?typeName=namespace:%s&SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&maxFeatures=50" % (feature)
self.isPGoogle = False
self.isPLatLon = False
if srs=="EPSG:4326":
self.isPLatLon = True
elif srs=="EPSG:900913" or srs == "EPSG:3857":
self.isPGoogle = True
try:
self.projection = pGoogle
except:
pass
else:
try:
self.projection = Proj(init=srs)
except:
pass | 33.308824 | 123 | 0.583959 | [
"MIT"
] | relet/kivyMaps | WFSOverlayServer.py | 6,795 | Python |
'''
Created on 2012/09/03
@author: amake
'''
from __future__ import print_function
import os
import sys
import urllib
import codecs
from datetime import datetime
from xml.etree import ElementTree
import putio
CACHE_FILE = "cache.txt"
FEEDS_FILE = "feeds.txt"
DEBUG = True
PUTIOAPI = None
# Stupid CloudFlare decided to block "non-standard" browsers.
# Spoofing the user-agent gets around it.
class CustomURLopener(urllib.FancyURLopener):
version = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5) '
'AppleWebKit/536.26.17 (KHTML like Gecko) Version/6.0.2 Safari/536.26.17'
urllib._urlopener = CustomURLopener()
def log(message):
if DEBUG:
print(message.encode('utf-8'))
class feedputter():
'''
Grab torrent files from an RSS feed.
'''
def __init__(self, feed):
'''
Constructor
'''
self.feed = feed
self.cache = []
if os.path.isfile(CACHE_FILE):
self.cache = [line.strip() for line in codecs.open(
CACHE_FILE, 'r', 'utf-8').readlines()]
def __get_items(self):
log("Fetching feed from: %s" % self.feed)
data = urllib.urlopen(self.feed).read()
tree = ElementTree.fromstring(data)
return tree.findall(".//item")
def save_torrent(self, link, target, title):
torrent = urllib.urlopen(link)
if (torrent.getcode() != 200):
log("Error " + torrent.getcode())
return False
with open(os.path.join(target, title + ".torrent"), "w") as out:
out.write(torrent.read())
return True
def putio(self, link, target, title):
api = putio.get_api(target_folder=target)
try:
api.add(link, putio.CALLBACK_URL + '?amk_type=tv')
except Exception as e:
print(e)
print('Skipping.')
return False
return True
def get_to(self, target, method):
'''
Fetch linked torrents and save to the specified output folder.
'''
for item in self.__get_items():
title = item.find('title').text.strip()
link = item.find('link').text
log("Found " + title)
if title in self.cache:
log("Already gotten. Skipping.")
continue
log("Getting ... ")
if not method(link, target, title):
continue
with codecs.open(CACHE_FILE, "a", "utf-8") as tmp:
tmp.write(title + "\n")
log("Done")
def usage():
print('Usage: {0} TARGET_DIR'.format(os.path.basename(__file__)))
def main():
if len(sys.argv) < 2:
usage()
sys.exit(1)
if not os.path.isdir(sys.argv[1]):
print('Directory not found or not a directory:', sys.argv[1])
print()
usage()
sys.exit(1)
os.chdir(os.path.dirname(__file__))
feeds = [line.strip() for line in open(FEEDS_FILE).readlines()]
log(datetime.now().isoformat(" ") +
" Starting feedputter with {0} feeds".format(len(feeds)))
for feed in feeds:
getter = feedputter(feed)
getter.get_to(sys.argv[1], getter.putio)
log(datetime.now().isoformat(" ") + " Finished feedputter")
if __name__ == "__main__":
main()
| 22.52381 | 77 | 0.578677 | [
"MIT"
] | amake/puttools-py | feedputter.py | 3,311 | Python |
# -*- coding: utf-8 -*-
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from .models import OembedVideoPlugin, OembedRichPlugin
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
class CMSOembedVideoPlugin(CMSPluginBase):
name = _('Video (embedded)')
model = OembedVideoPlugin
render_template = 'djangocms_oembed/plugins/video.html'
admin_preview = False
text_enabled = True
fieldsets = (
(None, {'fields': ('oembed_url', ('width', 'height',), 'autoplay', 'loop', 'show_related',)}),
('advanced', {'fields': ('type', 'provider', 'html', 'data'), 'classes': ['collapse']}),
)
readonly_fields = ('type', 'provider', 'html', 'data',)
def icon_src(self, instance):
return settings.STATIC_URL + u"cms/images/plugins/snippet.png"
plugin_pool.register_plugin(CMSOembedVideoPlugin)
class CMSOembedRichPlugin(CMSPluginBase):
name = _('Rich Content (embedded)')
model = OembedRichPlugin
render_template = 'djangocms_oembed/plugins/rich.html'
admin_preview = False
text_enabled = True
fieldsets = (
(None, {'fields': ('oembed_url',)}),
('advanced', {'fields': ('type', 'provider', 'html', 'data'), 'classes': ['collapse']}),
)
readonly_fields = ('type', 'provider', 'html', 'data',)
def icon_src(self, instance):
return settings.STATIC_URL + u"cms/images/plugins/snippet.png"
plugin_pool.register_plugin(CMSOembedRichPlugin)
| 35.209302 | 102 | 0.678996 | [
"BSD-3-Clause"
] | MatthewWilkes/djangocms-oembed | djangocms_oembed/cms_plugins.py | 1,514 | Python |
from typing import List
import json
import hashlib
from time import time
from base64 import b64decode, b64encode
import ecdsa
from config import ECDSA_CURVE
from .constants import BLOCK_COUNT_FREEZE_WALLET_LOTTERY_AFTER_WIN, DEVELOPER_KEY
from .transaction import Transaction
from .exceptions import (
ValidationError,
NonLotteryMemberError,
WalletLotteryFreezeError,
GenesisIsNotValidError,
NonSequentialBlockIndexError,
NonMatchingHashError
)
class Block:
def __init__(
self,
index,
previous_hash,
timestamp=None,
forger=None,
transactions: List[Transaction] = None,
signature=None,
**kwargs,
):
"""
Create block
:param index: the block index at the chain (0 for the genesis block and so on)
:param previous_hash: hash of previous block
:param timestamp: block creation time
:param forger: public_address of forger wallet
:param transactions: list of transactions
:param signature: signature of the block hash by the forger
"""
if timestamp is None:
timestamp = time()
if transactions is None:
transactions = []
self.index = index
self.previous_hash = previous_hash
self.timestamp = timestamp
self.forger = forger
self.transactions = transactions
self.signature = signature
@property
def forger_public_key(self) -> ecdsa.VerifyingKey:
forger_public_key_string = bytes.fromhex(self.forger)
return ecdsa.VerifyingKey.from_string(forger_public_key_string, curve=ECDSA_CURVE)
def _raw_data(self):
return {
"index": self.index,
"timestamp": self.timestamp,
"transactions": sorted([
transaction.to_dict() for transaction in self.transactions
], key=lambda t: t["nonce"]),
"previous_hash": self.previous_hash,
"forger": self.forger,
}
def hash(self):
"""
Calculate the block hash (block number, previous hash, transactions)
:return: String hash of block data (hex)
"""
block_dict = self._raw_data()
# We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes
block_string = json.dumps(block_dict, sort_keys=True).encode()
return hashlib.sha256(block_string).hexdigest()
def to_dict(self):
return {
**self._raw_data(),
"hash": self.hash(),
"signature": b64encode(self.signature).decode(),
}
def add_transaction(self, transaction: Transaction):
"""
Add transaction to block
:param transaction: Transaction object (see transaction.py)
:raise Validation error if transaction isn't valid.
:return: None
"""
self.transactions.append(transaction)
def is_signature_verified(self) -> bool:
"""
Check if block signature is valid
:return: bool
"""
try:
return self.forger_public_key.verify(self.signature, self.hash().encode())
except ecdsa.BadSignatureError:
return False
def create_signature(self, forger_private_address: str):
"""
Create block signature for this block
:param forger_private_address: base64(wallet private address)
:return: None
"""
forger_private_key_string = bytes.fromhex(forger_private_address)
forger_private_key = ecdsa.SigningKey.from_string(forger_private_key_string, curve=ECDSA_CURVE)
if forger_private_key.get_verifying_key() != self.forger_public_key:
raise ValueError("The forger is not the one signing")
self.signature = self.sign(forger_private_key)
def sign(self, forger_private_key: ecdsa.SigningKey):
return forger_private_key.sign(self.hash().encode())
def validate(self, blockchain_state, is_test_net=False):
"""
Validate block
1. check block index (is the next block in the blockchain state)
2. check previous hash (is the hash of the previous block)
3. check forger wallet (is lottery member?)
4. check block signature
5. validate transactions
:param is_test_net: if True ignore InsufficientBalanceError and NonLotteryMemberError
:param blockchain_state: Blockchain state object
:raises ValidationError
:return: None
"""
if self.index == 0 and blockchain_state.length == 0:
genesis_is_valid = self.forger == DEVELOPER_KEY and self.is_signature_verified()
if not genesis_is_valid:
raise GenesisIsNotValidError()
return
# TODO: check in production if hash if equal to hard coded hash
if self.index != blockchain_state.length:
raise NonSequentialBlockIndexError(
f"block index not sequential index: {self.index} chain: {blockchain_state.length}"
)
if self.previous_hash != blockchain_state.last_block_hash:
raise NonMatchingHashError("previous hash not match previous block hash")
forger_wallet = blockchain_state.wallets.get(self.forger, None)
if forger_wallet is None or forger_wallet.balance < 100:
if not is_test_net:
raise NonLotteryMemberError()
if not self.is_signature_verified():
raise ValidationError("invalid signature")
for transaction in self.transactions:
transaction.validate(
blockchain_state=blockchain_state, is_test_net=is_test_net
) # raises ValidationError
# TODO: Add timestamp validation
@classmethod
def from_dict(
cls,
index: int,
previous_hash,
forger,
transactions: dict,
signature: str,
**kwargs,
):
transactions = list(map(lambda t: Transaction.from_dict(**t), transactions))
signature = b64decode(signature.encode())
return cls(
index=index,
previous_hash=previous_hash,
forger=forger,
transactions=transactions,
signature=signature,
**kwargs,
)
def __getitem__(self, item):
return getattr(self, item)
| 35.10989 | 103 | 0.637715 | [
"MIT"
] | thewh1teagle/yoyocoin | src/blockchain/block.py | 6,390 | Python |
conf_linuxbridge_agent_ini = """[DEFAULT]
#
# From oslo.log
#
# If set to true, the logging level will be set to DEBUG instead of the default
# INFO level. (boolean value)
#debug = false
# If set to false, the logging level will be set to WARNING instead of the
# default INFO level. (boolean value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#verbose = true
# The name of a logging configuration file. This file is appended to any
# existing logging configuration files. For details about logging configuration
# files, see the Python logging module documentation. Note that when logging
# configuration files are used then all logging configuration is set in the
# configuration file and other logging configuration options are ignored (for
# example, logging_context_format_string). (string value)
# Deprecated group/name - [DEFAULT]/log_config
#log_config_append = <None>
# Defines the format string for %%(asctime)s in log records. Default:
# %(default)s . This option is ignored if log_config_append is set. (string
# value)
#log_date_format = %Y-%m-%d %H:%M:%S
# (Optional) Name of log file to send logging output to. If no default is set,
# logging will go to stderr as defined by use_stderr. This option is ignored if
# log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
#log_file = <None>
# (Optional) The base directory used for relative log_file paths. This option
# is ignored if log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logdir
#log_dir = <None>
# Uses logging handler designed to watch file system. When log file is moved or
# removed this handler will open a new log file with specified path
# instantaneously. It makes sense only if log_file option is specified and
# Linux platform is used. This option is ignored if log_config_append is set.
# (boolean value)
#watch_log_file = false
# Use syslog for logging. Existing syslog format is DEPRECATED and will be
# changed later to honor RFC5424. This option is ignored if log_config_append
# is set. (boolean value)
#use_syslog = false
# Syslog facility to receive log lines. This option is ignored if
# log_config_append is set. (string value)
#syslog_log_facility = LOG_USER
# Log output to standard error. This option is ignored if log_config_append is
# set. (boolean value)
#use_stderr = true
# Format string to use for log messages with context. (string value)
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
# Format string to use for log messages when context is undefined. (string
# value)
#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
# Additional data to append to log message when logging level for the message
# is DEBUG. (string value)
#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
# Prefix each line of exception output with this format. (string value)
#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
# Defines the format string for %(user_identity)s that is used in
# logging_context_format_string. (string value)
#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
# List of package logging levels in logger=LEVEL pairs. This option is ignored
# if log_config_append is set. (list value)
#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
# The format for an instance that is passed with the log message. (string
# value)
#instance_format = "[instance: %(uuid)s] "
# The format for an instance UUID that is passed with the log message. (string
# value)
#instance_uuid_format = "[instance: %(uuid)s] "
# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false
[linux_bridge]
# (ListOpt) Comma-separated list of
# <physical_network>:<physical_interface> tuples mapping physical
# network names to the agent's node-specific physical network
# interfaces to be used for flat and VLAN networks. All physical
# networks listed in network_vlan_ranges on the server should have
# mappings to appropriate interfaces on each agent.
#
physical_interface_mappings = provider:{{ public_interface }}
# Example: physical_interface_mappings = physnet1:eth1
[vxlan]
# (BoolOpt) enable VXLAN on the agent
# VXLAN support can be enabled when agent is managed by ml2 plugin using
# linuxbridge mechanism driver.
enable_vxlan = True
#
# (IntOpt) use specific TTL for vxlan interface protocol packets
# ttl =
#
# (IntOpt) use specific TOS for vxlan interface protocol packets
# tos =
#
# (StrOpt) multicast group or group range to use for broadcast emulation.
# Specifying a range allows different VNIs to use different group addresses,
# reducing or eliminating spurious broadcast traffic to the tunnel endpoints.
# Ranges are specified by using CIDR notation. To reserve a unique group for
# each possible (24-bit) VNI, use a /8 such as 239.0.0.0/8.
# This setting must be the same on all the agents.
# vxlan_group = 224.0.0.1
#
# (StrOpt) Local IP address to use for VXLAN endpoints (required)
local_ip = {{ local_ip }}
#
# (BoolOpt) Flag to enable l2population extension. This option should be used
# in conjunction with ml2 plugin l2population mechanism driver (in that case,
# both linuxbridge and l2population mechanism drivers should be loaded).
# It enables plugin to populate VXLAN forwarding table, in order to limit
# the use of broadcast emulation (multicast will be turned off if kernel and
# iproute2 supports unicast flooding - requires 3.11 kernel and iproute2 3.10)
l2_population = True
[agent]
# Agent's polling interval in seconds
# polling_interval = 2
# (IntOpt) Set new timeout in seconds for new rpc calls after agent receives
# SIGTERM. If value is set to 0, rpc timeout won't be changed.
#
# quitting_rpc_timeout = 10
prevent_arp_spoofing = True
[securitygroup]
# Firewall driver for realizing neutron security group function
# firewall_driver = neutron.agent.firewall.NoopFirewallDriver
# Example: firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
"""
| 42.703704 | 414 | 0.775513 | [
"MIT"
] | jiasir/playback | playback/templates/linuxbridge_agent_ini.py | 6,918 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('focus', '0006_auto_20160209_1200'),
]
operations = [
migrations.AlterField(
model_name='remedial',
name='focusRoom',
field=models.ForeignKey(help_text=b'The focusroom that this remedial is assigned to', to='focus.FocusRoom'),
),
]
| 24.947368 | 120 | 0.635021 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | openshiksha/openshiksha | focus/migrations/0007_auto_20160209_1201.py | 474 | Python |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import fields, models
class HrJobTask(models.Model):
_name = 'hr.job.task'
name = fields.Char(string='Description')
job_id = fields.Many2one(comodel_name='hr.job', string='Job')
categ_id = fields.Many2one(comodel_name='hr.task.categ', string='Category')
| 42.133333 | 79 | 0.624209 | [
"BSD-2-Clause"
] | aroodooteam/aro_hr | models/hr_job_task.py | 1,264 | Python |
import numpy as np
from sklearn import preprocessing
class DataTransformation:
"""
A generic class for the transformation of data
"""
def __init__(self):
pass
def transform_X(self, X):
"""
transforms X
:param
X: Input X
:return
transformed X
"""
raise NotImplementedError()
def transform_Y(self, Y):
"""
transforms Y
:param
Y: Input Y
:return
transformed Y
"""
raise NotImplementedError()
def untransform_X(self, X):
"""
Untransforms X to its original values
:param
X: transformed X
:return
untransformed X
"""
raise NotImplementedError()
def untransform_Y(self, Y):
"""
Untransforms Y
:param
Y: transformed Y
:return
untransfomred Y
"""
raise NotImplementedError()
def untransform_Y_var(self, Yvar):
raise NotImplementedError()
def untransform_NLPD(self, NLPD):
"""
Untransfomrs NLPD to the original Y space
:param
NLPD: transfomred NLPD
:return
untransformed NLPD
"""
raise NotImplementedError()
class IdentityTransformation:
"""
Identity transformation. No transformation will be applied to data.
"""
def __init__(self):
pass
def transform_X(self, X):
return X
def transform_Y(self, Y):
return Y
def untransform_X(self, X):
return X
def untransform_Y(self, Y):
return Y
def untransform_Y_var(self, Yvar):
return Yvar
@staticmethod
def get_transformation(Y, X):
return IdentityTransformation()
def untransform_NLPD(self, NLPD):
return NLPD
class MeanTransformation(object, DataTransformation):
"""
Only transforms Y as follows:
transformed Y = untransformed Y - mean(Y)
"""
def __init__(self, mean):
super(MeanTransformation, self).__init__()
self.mean = mean
def transform_X(self, X):
return X
def transform_Y(self, Y):
return Y - self.mean
def untransform_X(self, X):
return X
def untransform_Y(self, Y):
return Y + self.mean
def untransform_Y_var(self, Yvar):
return Yvar
def untransform_NLPD(self, NLPD):
return NLPD
@staticmethod
def get_transformation(Y, X):
return MeanTransformation(Y.mean(axis=0))
class MeanStdYTransformation(object, DataTransformation):
"""
Transforms only Y in a way that the transformed Y has mean = 0 and std =1
"""
def __init__(self, scalar):
super(MeanStdYTransformation, self).__init__()
self.scalar = scalar
def transform_X(self, X):
return X
def transform_Y(self, Y):
return self.scalar.transform(Y)
def untransform_X(self, X):
return X
def untransform_Y(self, Y):
return self.scalar.inverse_transform(Y)
def untransform_Y_var(self, Yvar):
return Yvar
def untransform_NLPD(self, NLPD):
return NLPD + np.hstack((np.array([np.log(self.scalar.std_).sum()]), np.log(self.scalar.std_)))
@staticmethod
def get_transformation(Y, X):
return MeanStdYTransformation(preprocessing.StandardScaler().fit(Y))
class MinTransformation(object, DataTransformation):
"""
Transforms only Y.
transformed Y = (Y - min(Y)) / (max(Y) - min(Y)) - 0.5
"""
def __init__(self, min, max, offset):
super(MinTransformation, self).__init__()
self.min = min
self.max = max
self.offset = offset
def transform_X(self, X):
return X
def transform_Y(self, Y):
return (Y-self.min).astype('float')/(self.max-self.min) - self.offset
def untransform_X(self, X):
return X
def untransform_Y(self, Y):
return (Y+self.offset)*(self.max-self.min) + self.min
def untransform_Y_var(self, Yvar):
return Yvar * (self.max-self.min) ** 2
def untransform_NLPD(self, NLPD):
return NLPD + np.log(self.max - self.min)
@staticmethod
def get_transformation(Y, X):
return MinTransformation(Y.min(), Y.max(), 0.5)
| 21.63 | 103 | 0.596163 | [
"Apache-2.0"
] | VirgiAgl/V_savigp | GP/data_transformation.py | 4,326 | Python |
# encoding: utf-8
from functools import partial
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from config import config
from base_model import resnet50
from seg_opr.seg_oprs import ConvBnRelu
class CPNet(nn.Module):
def __init__(self, out_planes, criterion, pretrained_model=None,
norm_layer=nn.BatchNorm2d):
super(CPNet, self).__init__()
self.backbone = resnet50(pretrained_model, norm_layer=norm_layer,
bn_eps=config.bn_eps,
bn_momentum=config.bn_momentum,
deep_stem=True, stem_width=64)
self.backbone.layer3.apply(partial(self._nostride_dilate, dilate=2))
self.backbone.layer4.apply(partial(self._nostride_dilate, dilate=4))
self.business_layer = []
self.context = ObjectContext(2048, 512, norm_layer)
self.head_layer = nn.Sequential(
ConvBnRelu(2048 + 1024, 512, 3, 1, 1,
has_bn=True,
has_relu=True, has_bias=False, norm_layer=norm_layer),
nn.Dropout2d(0.1, inplace=False),
nn.Conv2d(512, out_planes, kernel_size=1)
)
self.aux_layer = nn.Sequential(
ConvBnRelu(1024, 512, 3, 1, 1,
has_bn=True,
has_relu=True, has_bias=False, norm_layer=norm_layer),
nn.Dropout2d(0.1, inplace=False),
nn.Conv2d(512, out_planes, kernel_size=1)
)
self.business_layer.append(self.context)
self.business_layer.append(self.head_layer)
self.business_layer.append(self.aux_layer)
self.criterion = criterion
self.bce_criterion = nn.BCELoss(reduction='mean')
def forward(self, data, label=None, aux_label=None):
blocks = self.backbone(data)
fm, intra_sim_map = self.context(blocks[-1])
fm = self.head_layer(fm)
fm = F.interpolate(fm, scale_factor=8, mode='bilinear',
align_corners=True)
softmax_fm = F.log_softmax(fm, dim=1)
aux_fm = self.aux_layer(blocks[-2])
aux_fm = F.interpolate(aux_fm, scale_factor=8, mode='bilinear',
align_corners=True)
if label is not None:
main_loss = self.criterion(fm, label)
aux_loss = self.criterion(aux_fm, label)
intra_sim_loss = self.bce_criterion(intra_sim_map, aux_label)
loss = main_loss + 0.4 * aux_loss + intra_sim_loss
return loss
return softmax_fm
# @staticmethod
def _nostride_dilate(self, m, dilate):
if isinstance(m, nn.Conv2d):
if m.stride == (2, 2):
m.stride = (1, 1)
if m.kernel_size == (3, 3):
m.dilation = (dilate // 2, dilate // 2)
m.padding = (dilate // 2, dilate // 2)
else:
if m.kernel_size == (3, 3):
m.dilation = (dilate, dilate)
m.padding = (dilate, dilate)
class ObjectContext(nn.Module):
def __init__(self, in_channels, inner_channel, norm_layer=nn.BatchNorm2d):
super(ObjectContext, self).__init__()
self.in_channels = in_channels
self.inner_channel = inner_channel
self.reduce_conv = ConvBnRelu(self.in_channels, self.inner_channel,
1, 1, 0,
has_bn=True, has_relu=True,
has_bias=False, norm_layer=norm_layer)
self.intra_similarity_branch = nn.Sequential(
ConvBnRelu(self.inner_channel, self.inner_channel, 1, 1, 0,
has_bn=True, has_relu=True,
has_bias=False, norm_layer=norm_layer),
ConvBnRelu(self.inner_channel, 3600, 1, 1, 0,
has_bn=True, has_relu=False,
has_bias=False, norm_layer=norm_layer),
)
self.intra_post_conv = ConvBnRelu(self.inner_channel,
self.inner_channel,
1, 1, 0, has_bn=True, has_relu=True,
has_bias=False, norm_layer=norm_layer)
self.inter_post_conv = ConvBnRelu(self.inner_channel,
self.inner_channel,
1, 1, 0, has_bn=True, has_relu=True,
has_bias=False, norm_layer=norm_layer)
def forward(self, x):
b, h, w = x.size(0), x.size(2), x.size(3)
value = self.reduce_conv(x)
intra_similarity_map = self.intra_similarity_branch(value)
intra_similarity_map = intra_similarity_map.view(b, h * w, -1)
intra_similarity_map = intra_similarity_map.permute(0, 2, 1)
intra_similarity_map = torch.sigmoid(intra_similarity_map)
inter_similarity_map = 1 - intra_similarity_map
value = value.view(b, self.inner_channel, -1)
value = value.permute(0, 2, 1)
intra_context = torch.bmm(intra_similarity_map, value)
intra_mask = torch.ge(intra_similarity_map, 0.5).float()
intra_mask_count = intra_mask.sum(dim=-1, keepdim=True)
intra_mask_count = intra_mask_count.masked_fill_(intra_mask_count.eq(0),
1)
intra_context = intra_context.div(intra_mask_count)
intra_context = intra_context.permute(0, 2, 1).contiguous()
intra_context = intra_context.view(b, self.inner_channel, *x.size()[2:])
intra_context = self.intra_post_conv(intra_context)
inter_context = torch.bmm(inter_similarity_map, value)
inter_mask = torch.ge(inter_similarity_map, 0.5).float()
inter_mask_count = inter_mask.sum(dim=-1, keepdim=True)
inter_mask_count = inter_mask_count.masked_fill_(inter_mask_count.eq(0),
1)
inter_context = inter_context.div(inter_mask_count)
inter_context = inter_context.permute(0, 2, 1).contiguous()
inter_context = inter_context.view(b, self.inner_channel, *x.size()[2:])
inter_context = self.inter_post_conv(inter_context)
output = torch.cat([x, intra_context, inter_context], dim=1)
return output, intra_similarity_map
if __name__ == "__main__":
model = PSPNet(150, None)
print(model)
| 41.556962 | 80 | 0.581937 | [
"MIT"
] | akinoriosamura/TorchSeg-mirror | model/cpn/ade.cpn.R50_v1c.v7/network.py | 6,566 | Python |
consumer_key = 'YOUR CONSUMER KEY'
consumer_secret = 'YOUR CONSUMER SECRET'
access_token = 'YOUR ACCESS TOKEN'
access_token_secret = 'YOUR ACCESS TOKEN SECRET'
| 32 | 48 | 0.79375 | [
"MIT"
] | joshsisto/bitcoin_tweeter | bitcoin_tweeter/credentials.py | 160 | Python |
#!/usr/bin/env python3
# Copyright 2019 The University of Manchester UK
# Copyright 2019 RO-Crate contributors <https://github.com/ResearchObject/ro-crate/graphs/contributors>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script retrieves the schema.org properties to generate
the corresponding simplified @context for RO-Crate
adding our additional properties.
Run as:
./schema-context.py 0.3-DRAFT > ../docs/0.3-DRAFT/context.jsonld
"""
import sys
import json
import requests
from collections import OrderedDict
import urllib.request
# Our own version
ROCRATE_VERSION="1.1-DRAFT"
# Update version from http://schema.org/docs/releases.html
# NOTE: Breaks due to https://github.com/schemaorg/schemaorg/issues/2805
SCHEMA_VERSION="10.0"
# Update from https://bioschemas.org/profiles/Workflow/
BIOSCHEMA_WORKFLOW_PROFILE = "https://bioschemas.org/profiles/ComputationalWorkflow/0.5-DRAFT-2020_07_21"
BIOSCHEMA_WORKFLOW_NS = "https://bioschemas.org/ComputationalWorkflow"
BIOSCHEMA_FORMAL_PARAMETER_NS = "https://bioschemas.org/FormalParameter"
BIOSCHEMA_FORMAL_PARAMETER_PROFILE = "https://bioschemas.org/profiles/FormalParameter/0.1-DRAFT-2020_07_21"
def main():
#url="http://schema.org/version/%s/schemaorgcontext.jsonld" % SCHEMA_VERSION
# Workaround for https://github.com/schemaorg/schemaorg/issues/2805
url="https://raw.githubusercontent.com/schemaorg/schemaorg/V%s-release/data/releases/%s/schemaorgcontext.jsonld" % (SCHEMA_VERSION, SCHEMA_VERSION)
with urllib.request.urlopen(url) as f:
schema = json.load(f)
if len(sys.argv) > 2:
version = sys.argv[1]
tag = sys.argv[2]
elif len(sys.argv) > 1:
tag = version = sys.argv[1]
else:
tag = version = ROCRATE_VERSION
schemakeys = list(schema["@context"].keys())
schemakeys.sort() # they are usually sorted anyway
j = OrderedDict()
j["@id"] = "https://w3id.org/ro/crate/%s/context" % version
j["name"] = "RO-Crate JSON-LD Context",
j["version"] = tag
j["url"] = {"@id": "https://w3id.org/ro/crate/%s" % version}
j["schemaVersion"] = {"@id": "http://schema.org/version/%s/" % SCHEMA_VERSION}
j["isBasedOn"] = [
{"@id": "http://schema.org/version/%s/" % SCHEMA_VERSION},
{"@id": "https://pcdm.org/2016/04/18/models"},
{"@id": BIOSCHEMA_WORKFLOW_PROFILE },
{"@id": BIOSCHEMA_FORMAL_PARAMETER_PROFILE }
]
j["license"] = {"@id": "https://creativecommons.org/publicdomain/zero/1.0/"}
context = OrderedDict()
j["@context"] = context
for k in schemakeys:
if ":" in k: # URL like https://www.w3.org/wiki/WebSchemas/SchemaDotOrgSources#TP
continue
if "@" in k: # @vocab?
continue
definition = schema["@context"][k]
if not "@id" in definition or isinstance(definition, str):
continue # bibo etc.
context[k] = schema["@context"][k]["@id"].replace("schema:", "http://schema.org/")
context.update(ADDITIONAL)
json.dump(j, sys.stdout, ensure_ascii=False, indent=5) # indent4 to match existing!
print() ## newline
# Ordered so we keep a somewhat ordered presentation in the JSON
ADDITIONAL = OrderedDict([
# This list should correspond to listing in
# https://researchobject.github.io/ro-crate/0.3-DRAFT/#additional-metadata-standards
("File", "http://schema.org/MediaObject"),
("path", "http://schema.org/contentUrl"),
("Journal", "http://schema.org/Periodical"),
("cite-as", "https://www.w3.org/ns/iana/link-relations/relation#cite-as"),
("hasFile", "http://pcdm.org/models#hasFile"),
("hasMember", "http://pcdm.org/models#hasMember"),
("RepositoryCollection", "http://pcdm.org/models#Collection"),
("RepositoryObject", "http://pcdm.org/models#object"),
# Temporary namespace for properties/types
# proposed https://bioschemas.org/profiles/Workflow/ draft 0.5
# Remove if/when added to schema.org release!
## BEGIN
("ComputationalWorkflow", BIOSCHEMA_WORKFLOW_NS),
("input", BIOSCHEMA_WORKFLOW_NS + "#input"),
("output", BIOSCHEMA_WORKFLOW_NS + "#output"),
("FormalParameter", BIOSCHEMA_FORMAL_PARAMETER_NS),
# https://github.com/schemaorg/schemaorg/issues/383#issuecomment-651040576
("funding", "http://schema.org/funding"),
## END
("wasDerivedFrom", "http://www.w3.org/ns/prov#wasDerivedFrom"),
("importedFrom", "http://purl.org/pav/importedFrom"),
("importedOn", "http://purl.org/pav/importedOn"),
("importedBy", "http://purl.org/pav/importedBy"),
("retrievedFrom", "http://purl.org/pav/retrievedFrom"),
("retrievedOn", "http://purl.org/pav/retrievedOn"),
("retrievedBy", "http://purl.org/pav/retrievedBy"),
("conformsTo", "http://purl.org/dc/terms/conformsTo"),
("@label", "http://www.w3.org/2000/01/rdf-schema#label"),
("pcdm", "http://pcdm.org/models#"),
("bibo", "http://purl.org/ontology/bibo/"),
("cc", "http://creativecommons.org/ns#"),
("dct", "http://purl.org/dc/terms/"),
("foaf", "http://xmlns.com/foaf/0.1/"),
("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#"),
("rdfa", "http://www.w3.org/ns/rdfa#"),
("rdfs", "http://www.w3.org/2000/01/rdf-schema#"),
("schema", "http://schema.org/"),
("frapo", "http://purl.org/cerif/frapo/"),
("rel", "https://www.w3.org/ns/iana/link-relations/relation#"),
("pav", "http://purl.org/pav/"),
("prov", "http://www.w3.org/ns/prov#"),
("wfdesc", "http://purl.org/ro/wfdesc#"),
("wfprov", "http://purl.org/ro/wfprov#"),
("roterms", "http://purl.org/ro/roterms#"),
("wf4ever", "http://purl.org/ro/wf4ever#"),
# Disabled, see https://github.com/ResearchObject/ro-crate/pull/73
# ("@base", None)
])
if __name__=="__main__":
if "-v" in sys.argv or "--version" in sys.argv:
print("schema-context.py %s" % ROCRATE_VERSION)
print("schema.org %s" % SCHEMA_VERSION)
sys.exit(0)
elif "-h" in sys.argv or "--help" in sys.argv:
print("schema-context.py [VERSION] [TAG]")
print("")
print("Generates context.jsonld from schema.org and additional terms")
print(" VERSION is RO-Crate Specification version (default: %s)" % ROCRATE_VERSION)
print(" TAG is RO-Crate Semantic Versioning tag (default same as VERSION)")
sys.exit(0)
else:
main()
| 40.511364 | 151 | 0.638289 | [
"Apache-2.0"
] | bedroesb/ro-crate | scripts/schema-context.py | 7,130 | Python |
import struct
import socket
import ipaddress
from .utils import calculate_checksum
IPV4_HEAD_FMT="!BBHHHBBHII" #H is unsigned short (2 bytes) ! is for network (big-endian)
class IPV4Datagram:
"""
This class contains 20 bytes IPV4 Datagram
https://en.wikipedia.org/wiki/IPv4
|0|1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|
---------------------------------------------------------------------------------------
|version| IHL | DSCP | ECN | Total Length |
---------------------------------------------------------------------------------------
| identification | flags | Fragemnt Offset |
---------------------------------------------------------------------------------------
| TTL | Protocol | Header Checksum |
---------------------------------------------------------------------------------------
| Source Ip Address |
---------------------------------------------------------------------------------------
| Destination Ip Address |
---------------------------------------------------------------------------------------
"""
def __init__(self, source_ip="1.1.1.1",destination_ip="1.1.1.1" , version=4, ihl=5, tos=0,identification=54321,fragment_offset = 0,
ttl=253,protocol = socket.IPPROTO_UDP,data='', checksum=0):
self.version = version
self.ihl = ihl
self.version_ihl = (self.version << 4) + self.ihl
self.tos = tos
self.identification=identification
self.fragment_offset = fragment_offset
self.ttl = ttl
self.protocol = protocol
self.checksum = checksum
self.source_ip =int(ipaddress.IPv4Address( source_ip )) # convert into integer
self.destination_ip = int(ipaddress.IPv4Address(destination_ip ))
self.data = data
self.length= 4 * self.ihl + len(self.data)
def __repr__(self):
return 'ICMPDatagram({},{},({},{}))'.format(self.type,self.code,self.checksum, self.data)
def pack(self):
ipv4_header = struct.pack(IPV4_HEAD_FMT, self.version_ihl,self.tos,self.length, self.identification,
self.fragment_offset, self.ttl, self.protocol, self.checksum, self.source_ip, self.destination_ip)
self.checksum = calculate_checksum(ipv4_header)
ipv4_header = struct.pack(IPV4_HEAD_FMT, self.version_ihl,self.tos,self.length, self.identification,
self.fragment_offset, self.ttl, self.protocol, self.checksum, self.source_ip, self.destination_ip)
return ipv4_header
def unpack(self, buffer):
ipv4_header_size = struct.calcsize(IPV4_HEAD_FMT)
ipv4_header_packed = buffer[:ipv4_header_size]
ipv4_header_unpacked = struct.unpack(IPV4_HEAD_FMT,ipv4_header_packed)
self.version_ihl = ipv4_header_unpacked[0]
self.ihl = self.version_ihl & 0xf
self.version = self.version_ihl >> 4
self.tos = ipv4_header_unpacked[1]
self.length = ipv4_header_unpacked[2]
self.identification = ipv4_header_unpacked[3]
self.fragment_offset = ipv4_header_unpacked[4]
self.ttl = ipv4_header_unpacked[5]
self.protocol = ipv4_header_unpacked[6]
self.checksum = ipv4_header_unpacked[7]
self.source_ip = str(ipaddress.IPv4Address(ipv4_header_unpacked[8] ))
self.destination_ip= str(ipaddress.IPv4Address(ipv4_header_unpacked[9] ))
self.data = buffer[ipv4_header_size:]
#print ("source ip == " + str( ipaddress.IPv4Address(self.source_ip)))
#print ("destination ip == " + str( ipaddress.IPv4Address(self.destination_ip)))
#print ("checksum = "+ str(self.checksum))
#print ("ttl == " + str(self.ttl))
| 48.554217 | 135 | 0.531762 | [
"MIT"
] | UsamaMehboob/RawSocketsPython | Raw_Socket_Protos/rawIPV4.py | 4,030 | Python |
# -*- coding:utf-8 -*-
"""
Copyright (c) 2013-2016 SYPH, All Rights Reserved.
-----------------------------------------------------------
Author: S.JunPeng
Date: 2016/12/22
Change Activity:
"""
import logging
import json
from vendor.utils.encrypt import Cryption
from apps.common.models import ClientOverview
from apps.remote.models import FeatureFieldRel
from apps.etl.context import ApplyContext
from vendor.errors.api_errors import *
logger = logging.getLogger('apps.featureapi')
class Judger(object):
"""
1.authentication (_check_identity)
2.data decryption (_decrypt)
3.check availability of arguments (_args_useful_check)
4.throw the Exceptions
5.finally check all works
"""
def __init__(self, client_code, data):
self.client_code = client_code
self.client_id = ''
self.client_secret = ''
self.des_key = ''
self.origin_data = data
self.cryption = Cryption()
self.apply_id = ''
self.target_features = []
self.arguments = {}
self.ret_msg = []
def _check_sum(self):
if self.client_id and self.client_secret and self.des_key and self.target_features and self.arguments \
and (len(self.target_features) == len(self.ret_msg)):
return True
else:
return False
def _check_identity(self):
client_package = ClientOverview.objects.filter(client_code=self.client_code)
if not client_package:
logger.error('Response from the function of `judge._check_identity`, error_msg=%s, rel_err_msg=%s'
% (UserIdentityError.message, 'No data in ClientOverview'), exc_info=True)
raise UserIdentityError # E02
client_package = client_package[0]
self.client_id = client_package.client_id
self.client_secret = client_package.client_secret
self.des_key = client_package.des_key
def encrypt(self, data):
json_data = json.dumps(data)
des_data = Cryption.aes_base64_encrypt(json_data, self.des_key)
return des_data
def _decrypt(self):
try:
json_data = Cryption.aes_base64_decrypt(self.origin_data, self.des_key)
message = json.loads(json_data)
except Exception as e:
logger.error('Response from the function of `judge._decrypt`, error_msg=%s, rel_err_msg=%s'
% (EncryptError.message, e.message), exc_info=True)
raise EncryptError # E03
self.apply_id = message.get('apply_id', None)
if not self.apply_id:
logger.error('Response from the function of `judge._decrypt`, error_msg=%s, rel_err_msg=%s'
% (GetApplyIdError.message, "Missing apply_id in the post_data"), exc_info=True)
raise GetApplyIdError # E04
self.target_features = message.get('res_keys', None)
if not self.target_features:
logger.error('Response from the function of `judge._decrypt`, error_msg=%s, rel_err_msg=%s'
% (GetResKeysError.message, "Missing res_keys in the post_data"), exc_info=True)
raise GetResKeysError # E05
apply_base = ApplyContext(self.apply_id)
self.arguments = apply_base.load()
if not self.arguments:
logger.error('Response from the function of `judge._decrypt`, error_msg=%s, rel_err_msg=%s'
% (GetArgumentsError.message, "Missing arguments in the post_data"), exc_info=True)
raise GetArgumentsError # E06
def _args_useful_check(self):
"""
need sql which mapping the target features and arguments
:return:
"""
arg_msg_list = FeatureFieldRel.objects.filter(
feature_name__in=self.target_features,
is_delete=False,
)
for arg_msg in arg_msg_list:
if arg_msg.raw_field_name in self.arguments.keys():
if self.ret_msg and (arg_msg.feature_name == (self.ret_msg[-1])['target_field_name']):
sub_msg = self.ret_msg[-1]
if arg_msg.feature_name == sub_msg['target_field_name']:
sub_msg['arguments'].update({
arg_msg.raw_field_name: self.arguments[arg_msg.raw_field_name],
})
self.ret_msg[-1] = sub_msg
else:
temp_msg = {
'data_identity': arg_msg.data_identity,
'target_field_name': arg_msg.feature_name,
'arguments': {
arg_msg.raw_field_name: self.arguments[arg_msg.raw_field_name],
}
}
self.ret_msg.append(temp_msg)
else:
logger.error('Response from the function of `judge._args_useful_check`, error_msg=%s, rel_err_msg=%s'
% (ArgumentsAvailableError.message, "Arguments are not enough to get all res_keys"),
exc_info=True)
raise ArgumentsAvailableError # E07
def work_stream(self):
self._check_identity()
self._decrypt()
self._args_useful_check()
return self._check_sum()
| 39.814815 | 117 | 0.596093 | [
"MIT"
] | diudiu/featurefactory | procuratorate/dataocean_judger.py | 5,375 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pytest
import os
import numpy as np
import glob
from psrsigsim.signal.fb_signal import FilterBankSignal
from psrsigsim.pulsar.pulsar import Pulsar
from psrsigsim.pulsar.portraits import DataPortrait
from psrsigsim.pulsar.profiles import DataProfile
from psrsigsim.ism.ism import ISM
from psrsigsim.telescope.telescope import Telescope
from psrsigsim.telescope.receiver import Receiver
from psrsigsim.telescope.backend import Backend
from psrsigsim.io.psrfits import PSRFITS
from psrsigsim.utils.utils import make_quant
from psrsigsim.io.txtfile import TxtFile
from psrsigsim.simulate.simulate import Simulation
@pytest.fixture
def j1713_profile():
"""
Numpy array of J1713+0747 profile.
"""
path = 'psrsigsim/data/J1713+0747_profile.npy'
return np.load(path)
@pytest.fixture
def PSRfits():
"""
Fixture psrfits class
"""
fitspath = "data/test.fits"
tempfits = "data/B1855+09.L-wide.PUPPI.11y.x.sum.sm"
return PSRFITS(path=fitspath, template=tempfits, fits_mode='copy')
@pytest.fixture
def param_dict():
"""
Fixture parameter dictionary.
"""
pdict = {'fcent' : 430,
'bandwidth' : 100,
'sample_rate' : 1.5625,
'dtype' : np.float32,
'Npols' : 1,
'Nchan' : 64,
'sublen' : 2.0,
'fold' : True,
'period' : 1.0,
'Smean' : 1.0,
'profiles' : [0.5, 0.5, 1.0], # Gaussian
'tobs' : 4.0,
'name' : 'J0000+0000',
'dm' : 10.0,
'tau_d' : 50e-9,
'tau_d_ref_f' : 1500.0,
'aperture' : 100.0,
'area' : 5500.0,
'Tsys' : 35.0,
'tscope_name' : "TestScope",
'system_name' : "TestSys",
'rcvr_fcent' : 430,
'rcvr_bw' : 100,
'rcvr_name' : "TestRCVR",
'backend_samprate' : 1.5625,
'backend_name' : "TestBack",
'tempfile' : None,
'parfile' : None,
}
return pdict
@pytest.fixture
def simulation():
"""
Fixture Simulation class. Cannot be the only simulation tested.
"""
sim = Simulation(fcent = 430,
bandwidth = 100,
sample_rate = 1.0*2048*10**-6,
dtype = np.float32,
Npols = 1,
Nchan = 64,
sublen = 2.0,
fold = True,
period = 1.0,
Smean = 1.0,
profiles = None,
tobs = 4.0,
name = 'J0000+0000',
dm = 10.0,
tau_d = 50e-9,
tau_d_ref_f = 1500.0,
aperture = 100.0,
area = 5500.0,
Tsys = 35.0,
tscope_name = "TestScope",
system_name = "TestSys",
rcvr_fcent = 430,
rcvr_bw = 100,
rcvr_name ="TestRCVR",
backend_samprate = 1.5625,
backend_name = "TestBack",
tempfile = "data/B1855+09.L-wide.PUPPI.11y.x.sum.sm",
parfile = None,
psrdict = None)
return sim
def test_initsim(param_dict):
"""
Test initializing the simulation from dictionary, parfile
"""
sim = Simulation(psrdict = param_dict)
with pytest.raises(NotImplementedError):
sim2 = Simulation(parfile = "testpar.par")
def test_initsig(simulation):
"""
Test init_signal function.
"""
# Test from input params
simulation.init_signal()
# Test from template file
simulation.init_signal(from_template = True)
def test_initprof(simulation, j1713_profile):
"""
Test init_profile function.
"""
# Test no input
simulation.init_profile()
# Test function input
with pytest.raises(NotImplementedError):
def gprof(x, p0):
return p0[0]* np.exp(-0.5*((x-p0[1])/(p0[2]))**2)
simulation._profiles = gprof
simulation.init_profile()
# Test Gaussian as input
simulation._profiles = [0.5, 0.5, 1.0]
simulation.init_profile()
# Test data array as input
simulation._profiles = j1713_profile
simulation.init_profile()
# Test array that's not long enough
with pytest.raises(RuntimeError):
simulation._profiles = [0.5, 0.5]
simulation.init_profile()
# Test profile class as input
pr = DataProfile(j1713_profile,phases=None)
print(type(pr), pr)
simulation._profiles = pr
simulation.init_profile()
def test_initpsr(simulation):
"""
Test init_pulsar function.
"""
simulation.init_pulsar()
def test_initism(simulation):
"""
Test init_ism function.
"""
simulation.init_ism()
def test_inittscope(simulation):
"""
Test init_telescope function.
"""
# Test init GBT
simulation._tscope_name = "GBT"
simulation.init_telescope()
# Test init Arecibo
simulation._tscope_name = "Arecibo"
simulation.init_telescope()
# Test input telescope
simulation._tscope_name = "TestScope"
simulation.init_telescope()
# Test list of systems for telescope
simulation._system_name = ["Sys1", "Sys2"]
simulation._rcvr_fcent = [430, 800]
simulation._rcvr_bw = [100, 200]
simulation._rcvr_name = ["R1", "R2"]
simulation._backend_samprate = [1.5625, 12.5]
simulation._backend_name = ["B1", "B2"]
simulation.init_telescope()
# And the catch with multiple systems
with pytest.raises(RuntimeError):
simulation._backend_name = ["B1", "B2", "B3"]
simulation.init_telescope()
def test_simulate(simulation):
"""
Test simulate function.
"""
simulation.simulate()
@pytest.mark.filterwarnings('ignore::fitsio.FITSRuntimeWarning')
def test_savesim(simulation, PSRfits):
"""
Test save simulation function.
"""
simulation._Nchan = 1
simulation._tobs = 2.0
#S = PSRfits.make_signal_from_psrfits()
#simulation._tobs = PSRfits.tsubint.value*PSRfits.nsubint
simulation.simulate(from_template = True)
# Try pdv format
simulation.save_simulation(out_format = "pdv")
# Try psrfits format
simulation.save_simulation(out_format = "psrfits", phaseconnect = False)
os.remove("sim_fits.fits")
# Try psrfits format with phaseconnect = True
#parfile = "data/test_parfile.par"
#simulation._parfile = parfile
#simulation.save_simulation(out_format = "psrfits", phaseconnect = True)
#os.remove("sim_fits.fits")
dfs = glob.glob("simfits*")
for df in dfs:
os.remove(df)
# Try psrfits with runtime error
# Try wrong output file type
with pytest.raises(RuntimeError):
simulation.save_simulation(out_format = "wrong_fmt")
simulation._tempfile = None
simulation.save_simulation(out_format = "psrfits")
| 30.229437 | 76 | 0.597594 | [
"MIT"
] | bshapiroalbert/PsrSigSim | tests/test_simulate.py | 6,983 | Python |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 10 22:59:51 2019
@author: Sravan
"""
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 14 22:36:21 2019
@author: Sravan
"""
import csv
import numpy as np
from scipy.spatial.distance import pdist, squareform, euclidean, cdist
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import scipy.integrate as integrate
import matplotlib.animation as animation
"""
Variables: Wind speed, Air traffic (# of drones), Obstacles (Trees, Buildings)
Fixed: Distance, Air Resistance, Gravity, Battery level
Rules: Drone Speed (Air traffic, Wind speed, Battery level), Collisions (Drone position)
Study: Time, Speed
Movement: v_air = sqrt(mg/(nAρ)), p = 1.22 kg m^-3, A = 1 m^2
½cρAv2 = mgtanθ, c = drag coefficient
P = ½ρnAv_air(v_air2 – v2sin2θ)
Collisions: Drone - Increase/Decrease Speed, 2) Change path- increasing elevation
https://www.research-drone.com/en/extreme_climb_rate.html
https://en.wikipedia.org/wiki/Amazon_Prime_Air
https://homepages.abdn.ac.uk/nph120/meteo/DroneFlight.pdf
"""
class ParticleBox:
"""Orbits class
init_state is an [N x 6] array, where N is the number of particles:
[[xi1, yi1, zi1, xf1, yf1, zf1, vx1, vy1, vz1, t1],
[xi2, yi2, zi2, xf2, yf2, zf2, vx2, vy2, vz2, t2],
... ]
bounds is the size of the box: [xmin, xmax, ymin, ymax, zmin, zmax]
"""
def __init__(self,
drones = 1,
wind = [0, 0, 0],
obstacles = 0,
bounds = [-32000, 32000, -32000, 32000, 0, 150],
size = 1.5,
max_height = 122,
max_speed = 22.34,
acc = 7,
M = 25.0,
G = 9.81):
self.drones = drones
self.wind = wind
self.size = size
self.G = G
self.max_height = max_height
self.max_speed = max_speed
self.acc_vert = acc
self.acc_vert_eff = acc + G
self.acc_hor = acc
self.obstacles = 0
self.obstacles_size = 40
self.time_elapsed = 0
self.bounds = bounds
np.random.seed(0)
init_state = np.random.random((drones, 10))
init_state[:, :2] -= 0.5
init_state[:, :2] *= bounds[1]*2
init_state[:, 2:] = 0.0
for i in range(len(init_state)):
vecs = [64000.0, 64000.0]
while vecs[0] > bounds[1] or vecs[0] < bounds[0] or vecs[1] > bounds[3] or vecs[1] < bounds[2]:
vecs = np.random.standard_normal(2)
mags = np.linalg.norm(vecs)
vecs /= mags
vecs *= 16000
vecs += init_state[i, :2]
init_state[i, 3:5] =vecs
if obstacles > 0:
np.random.seed(1)
obs_state = np.random.random((obstacles, 3))
obs_state[:, :3] -= 0.5
obs_state[:, :2] *= bounds[1]*2
obs_state[:, 2] *= bounds[5]*2
self.init_state = np.asarray(init_state, dtype=float)
#self.obs_state = np.asarray(obs_state, dtype=float)
self.M = M * np.ones(self.init_state.shape[0])
self.state = self.init_state.copy()
#update velocity
self.state[:, 6] = self.wind[0]
self.state[:, 7] = self.wind[1]
self.state[:, 8] = self.wind[2]
def step(self, dt):
"""step once by dt seconds"""
self.time_elapsed += dt
# find distance to goal
D = cdist(self.state[:, :3], self.state[:, 3:6], 'euclidean')
ind, din = np.where(D > 122)
uniqua = (ind == din)
ind = ind[uniqua]
# update velocities of individual drones
for i in zip(ind):
#velocity vector
v = self.state[i, 8]
v_avg = v
a_ver = self.acc_vert
a_ver_eff = self.acc_vert_eff
height = self.max_height - self.state[i, 2]
print(height)
if height > 0:
n = 1
if v > 0:
n = v / abs(v)
stop = n * v**2/(2 * a_ver)
t_end = abs(v / a_ver)
b1 = (v**2 + t_end**2)**(0.5)
b2 = ((v + n * a_ver * dt)**2 + (t_end + dt)**2)**(0.5)
s1 = ((a_ver * dt)**2 + dt**2)**(0.5)
s2 = dt * 2
P = (b2 - b1) + s1 + s2
t = ((P/2) * (P/2 - s1) * (P/2 - s2) * (P/2 - b2 + b1))**(0.5)
h = 2 * t / (b2 - b1)
area = n * (t + (b2 - b1) * h)
if (t_end <= dt and stop > (height - area)):
v_avg = 0
self.state[i, 8] = 0
self.state[i, 2] = self.max_height
elif (stop > (height - area)):
t_max = 0
if stop < height:
a = 2 * (a_ver)**2
b = 4 * (a_ver) * v
c = v**2 - 2 * a_ver * height
t_max = (-b + (b**2 - 4 * a * c)**(0.5)) / (2 * a)
v_max = v + a_ver * (t_max / dt)
v_end = 2 * v_max - v - a_ver * dt
v_avg = ((v_max + v) / 2) * (t_max / dt) + ((v_max + v_end) / 2) * ((dt - t_max) / dt)
self.state[i, 8] = v_end
else:
v_avg = v + a_ver * dt / 2
self.state[i, 8] += a_ver * dt
elif height < 0:
n = v / abs(v)
stop = n * v**2/(2 * a_ver_eff)
t_end = abs(v / a_ver_eff)
b1 = (v**2 + t_end**2)**(0.5)
b2 = ((v + n * a_ver_eff * dt)**2 + (t_end + dt)**2)**(0.5)
s1 = ((a_ver_eff * dt)**2 + dt**2)**(0.5)
s2 = dt * 2
P = (b2 - b1) + s1 + s2
t = ((P/2) * (P/2 - s1) * (P/2 - s2) * (P/2 - b2 + b1))**(0.5)
h = 2 * t / (b2 - b1)
area = n * (t + (b2 - b1) * h)
if (t_end <= dt and abs(stop) <= abs(height)):
v_avg = (v / 2) * (t_end / dt)
self.state[i, 8] = v + a_ver_eff * t_end
elif (stop < (height - area)):
v_max = (height * (2 * a_ver_eff))**(0.5)
t_max = (v_max - v)/a_ver_eff
v_end = 2 * v_max - v - a_ver_eff * dt
v_avg = ((v_max + v) / 2) * (t_max / dt) + ((v_max + v_end) / 2) * ((dt - t_max) / dt)
self.state[i, 8] = v_end
else:
v_avg = v - a_ver_eff * dt / 2
self.state[i, 8] = v - a_ver_eff * dt
else:
self.state[i, 8] += 0 * dt
self.state[i, 2] += v_avg * dt
# unit vector
r = self.state[i, 3:5] - self.state[i, :2]
m = np.linalg.norm(r)
u = r / m
#accelearting horizontal
a_hor = self.acc_hor
v_hor = self.state[i, 6:8]
h = np.linalg.norm(v_hor)
stop = h**2/(2 * a_hor)
t_end = h / a_hor
b1 = (h**2 + t_end**2)**(0.5)
b2 = ((h + a_hor * dt)**2 + (t_end + dt)**2)**(0.5)
s1 = ((a_hor * dt)**2 + dt**2)**(0.5)
s2 = dt*2
P = (b2 - b1) + s1 + s2
t = ((P/2) * (P/2 - s1) * (P/2 - s2) * (P/2 - b2 + b1))**(0.5)
s = 2 * t / (b2 - b1)
area = (t + (b2 - b1) * s)
if (t_end <= dt and stop < area):
v_hor = (h / 2) * (t_end / dt)
self.state[i, 6:8] = (h - (a_hor * t_end)) * u
elif (stop > (m - area)):
v_max = (m * (2 * a_hor))**(0.5)
t_max = (v_max - h)/a_hor
v_end = 2 * v_max - h - a_hor * dt
v_hor = ((v_max + h) / 2) * (t_max / dt) + ((v_max + v_end) / 2) * ((dt - t_max) / dt)
self.state[i, 6:8] = v_end * u
else:
v_hor = h + a_hor * dt / 2
self.state[i, 6:8] = (h + a_hor * dt) * u
self.state[i, :2] += (v_hor * dt) * u
#find drones hovering
done, fund = np.where(D <= 122)
uniquo = (done == fund)
done = done[uniquo]
for d in zip(done):
print("here")
#velocity vector
v = self.state[i, 8]
v_avg = v
a_ver_eff = self.acc_vert_eff
#accelerating negative z
n = -1
if v < 0:
n = v / abs(v)
stop = n * v**2/(2 * a_ver_eff)
t_end = abs(v / a_ver_eff)
b1 = (v**2 + t_end**2)**(0.5)
b2 = ((v + n * a_ver_eff * dt)**2 + (t_end + dt)**2)**(0.5)
s1 = ((a_ver_eff * dt)**2 + dt**2)**(0.5)
s2 = dt * 2
P = (b2 - b1) + s1 + s2
t = ((P/2) * (P/2 - s1) * (P/2 - s2) * (P/2 - b2 + b1))**(0.5)
h = 2 * t / (b2 - b1)
area = n * (t + (b2 - b1) * h)
if (t_end <= dt and stop > area):
v_avg = (v / 2) * (t_end / dt)
self.state[i, 8] = v + a_ver_eff * t_end
self.state[i, 9] = self.time_elapsed
elif (stop < (-self.state[i, 2] - area)):
v_max = ((-self.state[i, 2]) * (2 * a_ver_eff))**(0.5)
t_max = (v_max - v)/a_ver_eff
v_end = 2 * v_max - v - a_ver_eff * dt
v_avg = ((v_max + v) / 2) * (t_max / dt) + ((v_max + v_end) / 2) * ((dt - t_max) / dt)
self.state[i, 8] = v_end
else:
v_avg = v - a_ver_eff * dt / 2
self.state[i, 8] = v - a_ver_eff * dt
self.state[i, 2] += v_avg * dt
E = squareform(pdist(self.state[:, :3], 'euclidean'))
ind1, ind2 = np.where(E < (2 * self.size))
unique = (ind1 < ind2)
ind1 = ind1[unique]
ind2 = ind2[unique]
for i1, i2 in zip(ind1, ind2):
if (self.state[i1, 2] > self.state[i2, 2]):
self.state[i1, 8] += (self.acc_vert) * dt
self.state[i2, 8] -= (self.acc_vert_eff) * dt
else:
self.state[i1, 8] -= (self.acc_vert) * dt
self.state[i2, 8] += (self.acc_vert_eff) * dt
if self.obstacles > 0:
DO = np.vstack([self.state[:, :3].copy(), self.obs_state.copy()])
F = squareform(pdist(DO, 'euclidean'))
d_rone, obs = np.where(F < (2 * self.obstacles_size))
unique = (d_rone < obs and obs >= self.drones)
d_rone = d_rone[unique]
obs = obs[unique]
for d, o in zip(d_rone, obs):
if (self.obs_state[o-self.drones, 2] < 110 and self.state[d, 2] < self.obs_state[o-self.drones, 2]):
self.state[d, 8] += self.acc_vert * dt
else:
r = self.state[d, 3:5] - self.state[d, :2]
ro = self.obs_state[o-self.drones, :2] - self.state[d, :2]
r_rel = np.cross(r, ro)
if (r_rel[2] > 0):
self.state[d, 6] += self.acc_hor * dt
self.state[d, 7] += self.acc_hor * dt
else:
self.state[d, 6] -= self.acc_hor * dt
self.state[d, 7] -= self.acc_hor * dt
#restrict velocity
np.clip(self.state[:, 6], -self.max_speed + self.wind[0], self.max_speed + self.wind[0])
np.clip(self.state[:, 7], -self.max_speed + self.wind[1], self.max_speed + self.wind[1])
#------------------------------------------------------------
# set up initial state
box = ParticleBox()
dt = 1. # 1 fps
#ani = animation.FuncAnimation(fig, animate, frames=600, interval=10, init_func=init)
for i in range(10):
box.step(dt)
#final = np.hstack([box.init_state[:, :3], box.state[:, 3:]])
#with open('people.csv', 'w') as writeFile:
# writer = csv.writer(writeFile)
# writer.writerows(final) #2d list
"""with open('initial.csv', 'w') as writeInit:
writer = csv.writer(writeInit)
writer.writerows(box.init_state)
writeInit.close()
"""
with open('final_2.csv', 'w') as writeFin:
writer = csv.writer(writeFin)
writer.writerows(box.init_state)
writer.writerows(box.state)
writeFin.close()
print(box.state) | 37.127907 | 116 | 0.432509 | [
"MIT"
] | SVJayanthi/DroneSimulation | drone_2.py | 12,781 | Python |
__author__ = 'hofmann'
__version__ = '0.0.2.1'
import os
from scripts.MetaDataTable.metadatatable import MetadataTable
from scripts.NcbiTaxonomy.ncbitaxonomy import NcbiTaxonomy
from scripts.Validator.validator import Validator
class TaxonomicProfile(Validator):
"""
Constructing taxonomic profiles from files with relative abundances.
"""
_taxonomic_profile_version = "0.9.1"
def __init__(self, taxonomy, logfile=None, verbose=True, debug=False):
"""
@param taxonomy: taxonomy handler
@type taxonomy: NcbiTaxonomy
@param logfile: file handler or file path to a log file
@type logfile: file | FileIO | StringIO | str
@param verbose: Not verbose means that only warnings and errors will be past to stream
@type verbose: bool
@param debug: Display debug messages
@type debug: bool
"""
super(TaxonomicProfile, self).__init__(label="TaxonomicProfile", logfile=logfile, verbose=verbose, debug=debug)
self._ranks = ['superkingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species', 'strain']
assert isinstance(taxonomy, NcbiTaxonomy)
self._taxonomy = taxonomy
self._filename_taxonomic_profile = "taxonomic_profile_{sample_index}.txt"
def write_taxonomic_profile_from_abundance_files(
self, metadata_table, list_of_file_paths, directory_output, sample_id=""):
"""
Write a taxonomic profile file for each relative abundance file
@param metadata_table: Contains metadata of all communities
@type metadata_table: MetadataTable
@param list_of_file_paths: List of abundance file paths
@type list_of_file_paths: list[str | unicode]
@param directory_output: Profiles are written in this directory
@type directory_output: str | unicode
@param sample_id: Identifier of a sample
@type sample_id: str | unicode
"""
metadata_table_tmp = MetadataTable(logfile=self._logfile, verbose=self._verbose)
for index_abundance, file_path in enumerate(list_of_file_paths):
community_abundance = metadata_table_tmp.parse_file(file_path, column_names=False)
file_path_output = os.path.join(directory_output, self._filename_taxonomic_profile.format(
sample_index=index_abundance))
with open(file_path_output, 'w') as stream_output:
self.write_taxonomic_profile(
community_abundance,
stream_output,
metadata_table,
sample_id)
def write_taxonomic_profile(self, community_abundance, stream_output, metadata_table, sample_id=""):
"""
Stream a taxonomic profile by list of relative abundances
@param community_abundance: list of relative abundances
@type community_abundance: generator[ dict[int|long|str|unicode, str|unicode] ]
@param stream_output: Output of taxonomic profile
@type stream_output: file | FileIO | StringIO
@param metadata_table: Contains metadata of all communities
@type metadata_table: MetadataTable
@param sample_id: Identifier of a sample
@type sample_id: str | unicode
"""
assert isinstance(metadata_table, MetadataTable)
genome_abundance = {}
total_abundance = 0.0
# for community in community_abundance:
# all_communities += community
for genome_id, abundance in community_abundance:
if genome_id in genome_abundance:
raise IOError("genome id '{}' is not unique!".format(genome_id))
genome_abundance[genome_id] = float(abundance) # *float(total_length)
total_abundance += genome_abundance[genome_id]
for key, value in genome_abundance.items():
genome_abundance[key] = value / total_abundance
self._stream_taxonomic_profile(stream_output, genome_abundance, metadata_table, sample_id)
def _stream_taxonomic_profile(self, stream_output, genome_id_to_percent, metadata_table, sample_id=""):
"""
Stream a taxonomic profile by list of percentages by genome id
@param stream_output: Output of taxonomic profile
@type stream_output: file | FileIO | StringIO
@param genome_id_to_percent: Percentage for each genome id
@type genome_id_to_percent: dict[str|unicode, float]
@param metadata_table: Contains metadata of all communities
@type metadata_table: MetadataTable
@param sample_id: Identifier of a sample
@type sample_id: str | unicode
"""
strain_id_to_genome_id = {}
genome_id_to_strain_id = {}
genome_id_to_taxid = metadata_table.get_map(key_column_name="genome_ID", value_column_name="NCBI_ID")
genome_id_to_otu = metadata_table.get_map(key_column_name="genome_ID", value_column_name="OTU")
column_genome_id = metadata_table.get_column("genome_ID")
if not metadata_table.has_column("strain_id"):
column_strain_id = metadata_table.get_empty_column()
else:
column_strain_id = metadata_table.get_column("strain_id")
genome_id_to_strain_id = metadata_table.get_map(key_column_name="genome_ID", value_column_name="strain_id")
genome_id_to_lineage = self._get_genome_id_to_lineage(
genome_id_to_percent.keys(), genome_id_to_taxid, strain_id_to_genome_id, genome_id_to_strain_id)
percent_by_rank_by_taxid = self._get_percent_by_rank_by_taxid(genome_id_to_lineage, genome_id_to_percent)
# add strain_id to metadata
#for row_index, genome_id in enumerate(column_genome_id):
# column_strain_id[row_index] = genome_id_to_strain_id[genome_id]
#assert len(column_strain_id) == len(set(column_strain_id))
#metadata_table.insert_column(column_strain_id, "strain_id")
# stream taxonomic profile
self._stream_tp_header(stream_output, sample_id)
self._stream_tp_rows(stream_output, percent_by_rank_by_taxid, strain_id_to_genome_id, genome_id_to_otu)
def _get_genome_id_to_lineage(
self, list_of_genome_id, genome_id_to_taxid, strain_id_to_genome_id, genome_id_to_strain_id):
"""
Returnes the lineage for each genome id, assigning new strain id if not available
@param list_of_genome_id: List of identifier of genomes
@type list_of_genome_id: list[str|unicode]
@param genome_id_to_taxid: Assigned taxid for each genome id
@type genome_id_to_taxid: dict[str|unicode, str|unicode]
@param strain_id_to_genome_id: Mapping from strain id to genome id
@type strain_id_to_genome_id: dict[str|unicode, str|unicode]
@param genome_id_to_strain_id: Mapping from genome id to strain id
@type genome_id_to_strain_id: dict[str|unicode, str|unicode]
@return: lineage for each genome id using genome id as key
@rtype: dict[str|unicode, list[None|str|unicode]]
"""
strains_by_taxid = {}
genome_id_to_lineage = {}
for genome_id in list_of_genome_id:
tax_id = genome_id_to_taxid[genome_id]
if tax_id == "":
raise KeyError("genome_ID '{}' has no taxid!".format(genome_id))
tax_id = self._taxonomy.get_updated_taxid(tax_id)
genome_id_to_lineage[genome_id] = self._taxonomy.get_lineage_of_legal_ranks(
tax_id, ranks=self._ranks, default_value=None)
if genome_id_to_lineage[genome_id][-1] is not None:
continue
if tax_id not in strains_by_taxid:
strains_by_taxid[tax_id] = 0
strains_by_taxid[tax_id] += 1
if genome_id in genome_id_to_strain_id and genome_id_to_strain_id[genome_id]:
strain_id = genome_id_to_strain_id[genome_id]
else:
strain_id = "{}.{}".format(tax_id, strains_by_taxid[tax_id])
# make sure assigned strain ids are unique, in case of previous assigned ids
while strain_id in genome_id_to_strain_id.values():
strains_by_taxid[tax_id] += 1
strain_id = "{}.{}".format(tax_id, strains_by_taxid[tax_id])
genome_id_to_strain_id[genome_id] = strain_id
genome_id_to_lineage[genome_id][-1] = strain_id
strain_id_to_genome_id[strain_id] = genome_id
return genome_id_to_lineage
def _get_percent_by_rank_by_taxid(self, genome_id_to_lineage, genome_id_to_percent):
"""
Return the percentage for each taxid of a list of default ranks
@param genome_id_to_lineage: Mapping from genome id to a lineage (list)
@type genome_id_to_lineage: dict[str|unicode, list[None|str|unicode]]
@param genome_id_to_percent: Mapping from genome id to percentage
@type genome_id_to_percent: dict[str|unicode, float]
@return: Percentage for each taxid of a list of default ranks as dictionary of dictionaries
@rtype: dict[str|unicode, dict[str|unicode, float]]
"""
percent_by_rank_by_taxid = {}
for rank in self._ranks:
percent_by_rank_by_taxid[rank] = dict()
for rank_index, rank in enumerate(self._ranks):
# rank = ranks[rank_index]
for genome_id in genome_id_to_lineage:
tax_id = genome_id_to_lineage[genome_id][rank_index]
if tax_id is None:
continue
percent = genome_id_to_percent[genome_id]
if tax_id not in percent_by_rank_by_taxid[rank]:
percent_by_rank_by_taxid[rank][tax_id] = 0
percent_by_rank_by_taxid[rank][tax_id] += percent
return percent_by_rank_by_taxid
def _stream_tp_rows(self, stream_output, percent_by_rank_by_taxid, strain_id_to_genome_id, genome_id_to_otu):
"""
Stream the rows of the taxonomic profile.
@param stream_output: Output of taxonomic profile
@type stream_output: file | FileIO | StringIO
@param percent_by_rank_by_taxid: Percentage for each taxid of a list of default ranks as dictionary of dictionaries
@type percent_by_rank_by_taxid: dict[str|unicode, dict[str|unicode, float]]
@param strain_id_to_genome_id: Map from strain id to a genome identifier
@type strain_id_to_genome_id: dict[str|unicode, str|unicode]
@param genome_id_to_otu: Map from genome id to an otu identifier
@type genome_id_to_otu: dict[str|unicode, str|unicode]
"""
row_format = "{taxid}\t{rank}\t{taxpath}\t{taxpath_sn}\t{abp:.4f}\t{gid}\t{otu}\n"
for rank_index, rank in enumerate(self._ranks):
for tax_id in percent_by_rank_by_taxid[rank]:
if tax_id == '':
self._logger.warning("Missing rank %s for a genome" % rank)
continue
if '.' in tax_id:
genome_id = strain_id_to_genome_id[tax_id]
otu = genome_id_to_otu[genome_id]
lineage = self._taxonomy.get_lineage_of_legal_ranks(tax_id.split('.')[0], ranks=self._ranks, default_value="")
lineage[-1] = tax_id
else:
genome_id = ""
otu = ""
lineage = self._taxonomy.get_lineage_of_legal_ranks(tax_id, ranks=self._ranks, default_value="")
lineage = lineage[:rank_index+1]
lineage_sn = [self._taxonomy.get_scientific_name(tid) if tid != "" and '.' not in tid else "" for tid in lineage]
if '.' in tax_id:
lineage_sn[-1] = self._taxonomy.get_scientific_name(tax_id.split('.')[0]) + " strain" # ""
if percent_by_rank_by_taxid[rank][tax_id] != 0:
stream_output.write(row_format.format(
taxid=tax_id,
rank=rank,
taxpath="|".join(lineage),
taxpath_sn="|".join(lineage_sn),
abp=percent_by_rank_by_taxid[rank][tax_id]*100,
gid=genome_id,
otu=otu
))
def _stream_tp_header(self, output_stream, identifier):
"""
Stream the header of the taxonomic profile.
@param output_stream: Output of taxonomic profile
@type output_stream: file | FileIO | StringIO
@param identifier: Identifier of a sample
@type identifier: str | unicode
"""
output_stream.write("@SampleID:{}\n".format(identifier))
output_stream.write("@Version:{}\n".format(self._taxonomic_profile_version))
output_stream.write("@Ranks:{ranks}\n\n".format(ranks="|".join(self._ranks)))
output_stream.write("@@TAXID\tRANK\tTAXPATH\tTAXPATHSN\tPERCENTAGE\t_CAMI_genomeID\t_CAMI_OTU\n")
| 49.359848 | 130 | 0.659197 | [
"Apache-2.0"
] | alienzj/CAMISIM | scripts/ComunityDesign/taxonomicprofile.py | 13,031 | Python |
"""
Support for interface with a Bose Soundtouch.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.soundtouch/
"""
import logging
import re
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.media_player import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK,
SUPPORT_TURN_OFF, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_STEP,
SUPPORT_VOLUME_SET, SUPPORT_TURN_ON, SUPPORT_PLAY, MediaPlayerDevice,
PLATFORM_SCHEMA)
from homeassistant.const import (CONF_HOST, CONF_NAME, STATE_OFF, CONF_PORT,
STATE_PAUSED, STATE_PLAYING,
STATE_UNAVAILABLE)
REQUIREMENTS = ['libsoundtouch==0.7.2']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'media_player'
SERVICE_PLAY_EVERYWHERE = 'soundtouch_play_everywhere'
SERVICE_CREATE_ZONE = 'soundtouch_create_zone'
SERVICE_ADD_ZONE_SLAVE = 'soundtouch_add_zone_slave'
SERVICE_REMOVE_ZONE_SLAVE = 'soundtouch_remove_zone_slave'
MAP_STATUS = {
"PLAY_STATE": STATE_PLAYING,
"BUFFERING_STATE": STATE_PLAYING,
"PAUSE_STATE": STATE_PAUSED,
"STOP_STATE": STATE_OFF
}
DATA_SOUNDTOUCH = "soundtouch"
SOUNDTOUCH_PLAY_EVERYWHERE = vol.Schema({
vol.Required('master'): cv.entity_id
})
SOUNDTOUCH_CREATE_ZONE_SCHEMA = vol.Schema({
vol.Required('master'): cv.entity_id,
vol.Required('slaves'): cv.entity_ids
})
SOUNDTOUCH_ADD_ZONE_SCHEMA = vol.Schema({
vol.Required('master'): cv.entity_id,
vol.Required('slaves'): cv.entity_ids
})
SOUNDTOUCH_REMOVE_ZONE_SCHEMA = vol.Schema({
vol.Required('master'): cv.entity_id,
vol.Required('slaves'): cv.entity_ids
})
DEFAULT_NAME = 'Bose Soundtouch'
DEFAULT_PORT = 8090
SUPPORT_SOUNDTOUCH = SUPPORT_PAUSE | SUPPORT_VOLUME_STEP | \
SUPPORT_VOLUME_MUTE | SUPPORT_PREVIOUS_TRACK | \
SUPPORT_NEXT_TRACK | SUPPORT_TURN_OFF | \
SUPPORT_VOLUME_SET | SUPPORT_TURN_ON | SUPPORT_PLAY
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Bose Soundtouch platform."""
if DATA_SOUNDTOUCH not in hass.data:
hass.data[DATA_SOUNDTOUCH] = []
if discovery_info:
host = discovery_info['host']
port = int(discovery_info['port'])
# if device already exists by config
if host in [device.config['host'] for device in
hass.data[DATA_SOUNDTOUCH]]:
return
remote_config = {
'id': 'ha.component.soundtouch',
'host': host,
'port': port
}
soundtouch_device = SoundTouchDevice(None, remote_config)
hass.data[DATA_SOUNDTOUCH].append(soundtouch_device)
add_devices([soundtouch_device])
else:
name = config.get(CONF_NAME)
remote_config = {
'id': 'ha.component.soundtouch',
'port': config.get(CONF_PORT),
'host': config.get(CONF_HOST)
}
soundtouch_device = SoundTouchDevice(name, remote_config)
hass.data[DATA_SOUNDTOUCH].append(soundtouch_device)
add_devices([soundtouch_device])
def service_handle(service):
"""Handle the applying of a service."""
master_device_id = service.data.get('master')
slaves_ids = service.data.get('slaves')
slaves = []
if slaves_ids:
slaves = [device for device in hass.data[DATA_SOUNDTOUCH] if
device.entity_id in slaves_ids]
master = next([device for device in hass.data[DATA_SOUNDTOUCH] if
device.entity_id == master_device_id].__iter__(), None)
if master is None:
_LOGGER.warning("Unable to find master with entity_id: %s",
str(master_device_id))
return
if service.service == SERVICE_PLAY_EVERYWHERE:
slaves = [d for d in hass.data[DATA_SOUNDTOUCH] if
d.entity_id != master_device_id]
master.create_zone(slaves)
elif service.service == SERVICE_CREATE_ZONE:
master.create_zone(slaves)
elif service.service == SERVICE_REMOVE_ZONE_SLAVE:
master.remove_zone_slave(slaves)
elif service.service == SERVICE_ADD_ZONE_SLAVE:
master.add_zone_slave(slaves)
hass.services.register(DOMAIN, SERVICE_PLAY_EVERYWHERE,
service_handle,
schema=SOUNDTOUCH_PLAY_EVERYWHERE)
hass.services.register(DOMAIN, SERVICE_CREATE_ZONE,
service_handle,
schema=SOUNDTOUCH_CREATE_ZONE_SCHEMA)
hass.services.register(DOMAIN, SERVICE_REMOVE_ZONE_SLAVE,
service_handle,
schema=SOUNDTOUCH_REMOVE_ZONE_SCHEMA)
hass.services.register(DOMAIN, SERVICE_ADD_ZONE_SLAVE,
service_handle,
schema=SOUNDTOUCH_ADD_ZONE_SCHEMA)
class SoundTouchDevice(MediaPlayerDevice):
"""Representation of a SoundTouch Bose device."""
def __init__(self, name, config):
"""Create Soundtouch Entity."""
from libsoundtouch import soundtouch_device
self._device = soundtouch_device(config['host'], config['port'])
if name is None:
self._name = self._device.config.name
else:
self._name = name
self._status = self._device.status()
self._volume = self._device.volume()
self._config = config
@property
def config(self):
"""Return specific soundtouch configuration."""
return self._config
@property
def device(self):
"""Return Soundtouch device."""
return self._device
def update(self):
"""Retrieve the latest data."""
self._status = self._device.status()
self._volume = self._device.volume()
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume.actual / 100
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._status.source == 'STANDBY':
return STATE_OFF
return MAP_STATUS.get(self._status.play_status, STATE_UNAVAILABLE)
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._volume.muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_SOUNDTOUCH
def turn_off(self):
"""Turn off media player."""
self._device.power_off()
self._status = self._device.status()
def turn_on(self):
"""Turn on media player."""
self._device.power_on()
self._status = self._device.status()
def volume_up(self):
"""Volume up the media player."""
self._device.volume_up()
self._volume = self._device.volume()
def volume_down(self):
"""Volume down media player."""
self._device.volume_down()
self._volume = self._device.volume()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._device.set_volume(int(volume * 100))
self._volume = self._device.volume()
def mute_volume(self, mute):
"""Send mute command."""
self._device.mute()
self._volume = self._device.volume()
def media_play_pause(self):
"""Simulate play pause media player."""
self._device.play_pause()
self._status = self._device.status()
def media_play(self):
"""Send play command."""
self._device.play()
self._status = self._device.status()
def media_pause(self):
"""Send media pause command to media player."""
self._device.pause()
self._status = self._device.status()
def media_next_track(self):
"""Send next track command."""
self._device.next_track()
self._status = self._device.status()
def media_previous_track(self):
"""Send the previous track command."""
self._device.previous_track()
self._status = self._device.status()
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._status.image
@property
def media_title(self):
"""Title of current playing media."""
if self._status.station_name is not None:
return self._status.station_name
elif self._status.artist is not None:
return self._status.artist + " - " + self._status.track
return None
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._status.duration
@property
def media_artist(self):
"""Artist of current playing media."""
return self._status.artist
@property
def media_track(self):
"""Artist of current playing media."""
return self._status.track
@property
def media_album_name(self):
"""Album name of current playing media."""
return self._status.album
def play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
_LOGGER.debug("Starting media with media_id: " + str(media_id))
if re.match(r'http://', str(media_id)):
# URL
_LOGGER.debug("Playing URL %s", str(media_id))
self._device.play_url(str(media_id))
else:
# Preset
presets = self._device.presets()
preset = next([preset for preset in presets if
preset.preset_id == str(media_id)].__iter__(), None)
if preset is not None:
_LOGGER.debug("Playing preset: " + preset.name)
self._device.select_preset(preset)
else:
_LOGGER.warning(
"Unable to find preset with id " + str(media_id))
def create_zone(self, slaves):
"""
Create a zone (multi-room) and play on selected devices.
:param slaves: slaves on which to play
"""
if not slaves:
_LOGGER.warning("Unable to create zone without slaves")
else:
_LOGGER.info(
"Creating zone with master " + str(self.device.config.name))
self.device.create_zone([slave.device for slave in slaves])
def remove_zone_slave(self, slaves):
"""
Remove slave(s) from and existing zone (multi-room).
Zone must already exist and slaves array can not be empty.
Note: If removing last slave, the zone will be deleted and you'll have
to create a new one. You will not be able to add a new slave anymore
:param slaves: slaves to remove from the zone
"""
if not slaves:
_LOGGER.warning("Unable to find slaves to remove")
else:
_LOGGER.info("Removing slaves from zone with master " +
str(self.device.config.name))
self.device.remove_zone_slave([slave.device for slave in slaves])
def add_zone_slave(self, slaves):
"""
Add slave(s) to and existing zone (multi-room).
Zone must already exist and slaves array can not be empty.
:param slaves:slaves to add
"""
if not slaves:
_LOGGER.warning("Unable to find slaves to add")
else:
_LOGGER.info(
"Adding slaves to zone with master " + str(
self.device.config.name))
self.device.add_zone_slave([slave.device for slave in slaves])
| 33.035616 | 79 | 0.625643 | [
"Apache-2.0"
] | Anthonymcqueen21/home-assistant | homeassistant/components/media_player/soundtouch.py | 12,058 | Python |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
# The MIT License
# Copyright (c) 2017 - 2021 Tammo Ippen, [email protected]
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from plotille import Canvas
# The underlying canvas-implementation can be used on its own.
def main():
c = Canvas(width=40, height=20)
c.rect(0.1, 0.1, 0.6, 0.6)
c.line(0.1, 0.1, 0.6, 0.6)
c.line(0.1, 0.6, 0.6, 0.1)
c.line(0.1, 0.6, 0.35, 0.8)
c.line(0.35, 0.8, 0.6, 0.6)
c.text(0.3, 0.5, 'hi', color='red')
c.point(0.35, 0.35, color='blue')
c.fill_char(0.35, 0.1)
print(c.plot())
if __name__ == '__main__':
main()
| 37.152174 | 82 | 0.716793 | [
"MIT"
] | ntezak/plotille | examples/house_example.py | 1,709 | Python |
#!/usr/bin/env python
import random
import unittest
from kaldi.base.io import istringstream, ostringstream
from kaldi.cudamatrix import cuda_available, approx_equal_cu_matrix, CuMatrix
from kaldi.matrix import Matrix, Vector
from kaldi.matrix.functions import approx_equal
from kaldi.nnet3 import *
class TestNnetCompute(unittest.TestCase):
def test_nnet_compute(self):
gen_config = NnetGenerationOptions()
test_collapse_model = random.choice([True, False])
configs = generate_config_sequence(gen_config)
nnet = Nnet()
for j, config in enumerate(configs):
# print("Input config[{}]:".format(j))
# print(config)
istrm = istringstream.from_str(config)
nnet.read_config(istrm)
request = ComputationRequest()
inputs = compute_example_computation_request_simple(nnet, request)
if test_collapse_model:
set_batchnorm_test_mode(True, nnet)
set_dropout_test_mode(True, nnet)
compiler = Compiler(request, nnet)
opts = CompilerOptions()
computation = compiler.create_computation(opts)
nnet_collapsed = Nnet.from_other(nnet)
if test_collapse_model:
collapse_config = CollapseModelConfig()
collapse_model(collapse_config, nnet_collapsed)
compiler_collapsed = Compiler(request, nnet_collapsed)
computation_collapsed = compiler_collapsed.create_computation(opts)
computation_collapsed.compute_cuda_indexes()
ostrm = ostringstream()
computation.print_computation(ostrm, nnet)
# print("Generated computation:")
# print(ostrm.to_str())
check_config = CheckComputationOptions()
check_config.check_rewrite = True
checker = ComputationChecker(check_config, nnet, computation)
checker.check()
if random.choice([True, False]):
opt_config = NnetOptimizeOptions()
optimize(opt_config, nnet, max_output_time_in_request(request),
computation)
ostrm = ostringstream()
computation.print_computation(ostrm, nnet)
# print("Optimized computation:")
# print(ostrm.to_str())
compute_opts = NnetComputeOptions()
compute_opts.debug = random.choice([True, False])
computation.compute_cuda_indexes()
computer = NnetComputer(compute_opts, computation, nnet, nnet)
for i, ispec in enumerate(request.inputs):
temp = CuMatrix.from_matrix(inputs[i])
print("Input sum:", temp.sum())
computer.accept_input(ispec.name, temp)
computer.run()
output = computer.get_output_destructive("output")
print("Output sum:", output.sum())
if test_collapse_model:
computer_collapsed = NnetComputer(compute_opts,
computation_collapsed,
nnet_collapsed, nnet_collapsed)
for i, ispec in enumerate(request.inputs):
temp = CuMatrix.from_matrix(inputs[i])
computer_collapsed.accept_input(ispec.name, temp)
computer_collapsed.run()
output_collapsed = computer_collapsed.get_output_destructive("output")
print("Output sum [collapsed]:", output_collapsed.sum())
self.assertTrue(approx_equal_cu_matrix(output, output_collapsed),
"Regular and collapsed computation outputs differ.")
output_deriv = CuMatrix.from_size(output.num_rows(), output.num_cols())
output_deriv.set_randn()
if request.outputs[0].has_deriv:
computer.accept_input("output", output_deriv)
computer.run()
for i, ispec in enumerate(request.inputs):
if ispec.has_deriv:
in_deriv = computer.get_output_destructive(ispec.name)
print("Input-deriv sum for input {} is:".format(ispec.name),
in_deriv.sum())
def test_nnet_decodable(self):
gen_config = NnetGenerationOptions()
configs = generate_config_sequence(gen_config)
nnet = Nnet()
for j, config in enumerate(configs):
# print("Input config[{}]:".format(j))
# print(config)
istrm = istringstream.from_str(config)
nnet.read_config(istrm)
num_frames = 5 + random.randint(1, 100)
input_dim = nnet.input_dim("input")
output_dim = nnet.output_dim("output")
ivector_dim = max(0, nnet.input_dim("ivector"))
input = Matrix(num_frames, input_dim)
set_batchnorm_test_mode(True, nnet)
set_dropout_test_mode(True, nnet)
input.set_randn_()
ivector = Vector(ivector_dim)
ivector.set_randn_()
priors = Vector(output_dim if random.choice([True, False]) else 0)
if len(priors) != 0:
priors.set_randn_()
priors.apply_exp_()
output1 = Matrix(num_frames, output_dim)
output2 = Matrix(num_frames, output_dim)
opts = NnetSimpleComputationOptions()
opts.frames_per_chunk = random.randint(5, 25)
compiler = CachingOptimizingCompiler(nnet)
decodable = DecodableNnetSimple(opts, nnet, priors, input, compiler,
ivector if ivector_dim else None)
for t in range(num_frames):
decodable.get_output_for_frame(t, output1[t])
opts = NnetSimpleLoopedComputationOptions()
info = DecodableNnetSimpleLoopedInfo.from_priors(opts, priors, nnet)
decodable = DecodableNnetSimpleLooped(info, input,
ivector if ivector_dim else None)
for t in range(num_frames):
decodable.get_output_for_frame(t, output2[t])
if (not nnet_is_recurrent(nnet)
and nnet.info().find("statistics-extraction") == -1
and nnet.info().find("TimeHeightConvolutionComponent") == -1
and nnet.info().find("RestrictedAttentionComponent") == -1):
for t in range(num_frames):
self.assertTrue(approx_equal(output1[t], output2[t]))
if __name__ == '__main__':
for i in range(2):
if cuda_available():
from kaldi.cudamatrix import CuDevice
CuDevice.instantiate().set_debug_stride_mode(True)
if i == 0:
CuDevice.instantiate().select_gpu_id("no")
else:
CuDevice.instantiate().select_gpu_id("yes")
unittest.main(exit=False)
| 40.375758 | 82 | 0.624287 | [
"Apache-2.0"
] | Alienmaster/pykaldi | tests/nnet3/nnet-compute-test.py | 6,662 | Python |
# this is here to avoid a circular import
from collections import namedtuple
class Point(namedtuple("Point", ["x", "y", "group", "fid"])):
@property
def __geo_interface__(self):
return {"type": "Point", "coordinates": (self.x, self.y)}
def as_feature(self):
geometry = self.__geo_interface__
properties = {"group": self.group, "fid": self.fid}
return {"type": "Feature", "properties": properties, "geometry": geometry}
| 33.285714 | 82 | 0.641631 | [
"Apache-2.0"
] | eyeseast/dorchester | dorchester/point.py | 466 | Python |
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class AddressTokensTransactionUnconfirmedOmnilayertoken(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'name': (str,), # noqa: E501
'property_id': (str,), # noqa: E501
'transaction_type': (str,), # noqa: E501
'created_by_transaction_id': (str,), # noqa: E501
'amount': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name', # noqa: E501
'property_id': 'propertyId', # noqa: E501
'transaction_type': 'transactionType', # noqa: E501
'created_by_transaction_id': 'createdByTransactionId', # noqa: E501
'amount': 'amount', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, name, property_id, transaction_type, created_by_transaction_id, amount, *args, **kwargs): # noqa: E501
"""AddressTokensTransactionUnconfirmedOmnilayertoken - a model defined in OpenAPI
Args:
name (str): Specifies the name of the token.
property_id (str): Defines the ID of the property for Omni Layer.
transaction_type (str): Defines the type of the transaction made.
created_by_transaction_id (str): The transaction ID used to create the token.
amount (str): Defines the amount of tokens sent with the transaction that is pending confirmation.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
self.property_id = property_id
self.transaction_type = transaction_type
self.created_by_transaction_id = created_by_transaction_id
self.amount = amount
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 43.085561 | 484 | 0.60767 | [
"MIT"
] | dkremer-ledger/Crypto_APIs_2.0_SDK_Python | cryptoapis/model/address_tokens_transaction_unconfirmed_omnilayertoken.py | 8,057 | Python |
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class TestReport_TeardownSchema:
"""
A summary of information based on the results of executing a TestScript.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
A summary of information based on the results of executing a TestScript.
id: unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
action: The teardown action will only contain an operation.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.testreport_action2 import (
TestReport_Action2Schema,
)
if (
max_recursion_limit
and nesting_list.count("TestReport_Teardown") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["TestReport_Teardown"]
schema = StructType(
[
# unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The teardown action will only contain an operation.
StructField(
"action",
ArrayType(
TestReport_Action2Schema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
return schema
| 42.642276 | 96 | 0.563203 | [
"Apache-2.0"
] | icanbwell/SparkFhirSchemas | spark_fhir_schemas/stu3/complex_types/testreport_teardown.py | 5,245 | Python |
from tracking.harvest import save_dfes_avl
from django.core.management.base import BaseCommand
import logging
LOGGER = logging.getLogger('tracking_points')
class Command(BaseCommand):
help = "Runs harvest_tracking_email to harvest points"
def handle(self, *args, **options):
LOGGER.info('Harvesting DFES feed')
try:
print("Harvested {} from DFES; created {}, updated {}, ingored {}; Earliest seen {}, Lastest seen {}.".format(*save_dfes_avl()))
#LOGGER.info("Updated {} of {} scanned DFES devices".format(updated, num_records))
except Exception as e:
LOGGER.error(e)
| 32.05 | 140 | 0.673947 | [
"BSD-3-Clause"
] | fahmidaward/resource_tracking | tracking/management/commands/harvest_dfes_feed.py | 641 | Python |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from .amp_type import AMP_TYPE
from colossalai.context import Config
import torch.nn as nn
from torch.optim import Optimizer
from torch.nn.modules.loss import _Loss
from .torch_amp import convert_to_torch_amp
from .apex_amp import convert_to_apex_amp
from .naive_amp import convert_to_naive_amp
def convert_to_amp(model: nn.Module, optimizer: Optimizer, criterion: _Loss, mode: AMP_TYPE, amp_config: Config = None):
"""A helper function to wrap training components with Torch AMP modules.
Args:
param model (:class:`torch.nn.Module`): your model object.
optimizer (:class:`torch.optim.Optimizer`): your optimizer object.
criterion (:class:`torch.nn.modules.loss._Loss`): your loss function object.
mode (:class:`colossalai.amp.AMP_TYPE`): amp mode.
amp_config (Union[:class:`colossalai.context.Config`, dict]): configuration for different amp modes.
Returns:
A tuple (model, optimizer, criterion).
Note:
``amp_config`` may vary from different mode you choose. You should check the corresponding amp mode
for more details about ``amp_config``.
For ``apex_amp``, please check
`apex_amp config <https://nvidia.github.io/apex/amp.html?highlight=apex%20amp>`_.
For ``naive_amp``, please check
`naive_amp config <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/amp/naive_amp/_fp16_optimizer.py#L42>`_.
For ``torch_amp``, please check
`torch_amp config <https://github.com/pytorch/pytorch/blob/master/torch/cuda/amp/grad_scaler.py#L97>`_.
"""
assert isinstance(mode, AMP_TYPE), \
f'expected the argument mode be AMP_TYPE, but got {type(mode)}'
if amp_config is None:
amp_config = Config()
if mode == AMP_TYPE.TORCH:
model, optimizer, criterion = convert_to_torch_amp(model, optimizer, criterion, amp_config)
elif mode == AMP_TYPE.APEX:
model, optimizer = convert_to_apex_amp(model, optimizer, amp_config)
elif mode == AMP_TYPE.NAIVE:
model, optimizer = convert_to_naive_amp(model, optimizer, amp_config)
return model, optimizer, criterion
| 43.098039 | 128 | 0.708826 | [
"Apache-2.0"
] | Cautiousss/ColossalAI | colossalai/amp/__init__.py | 2,198 | Python |
from collections import defaultdict
import pandas as pd
import pickle
from sqlalchemy import create_engine, inspect, Table, Column
from sqlalchemy.engine.url import make_url
from sys import exit
class DatabaseClient:
""" Takes care of the database pass opening to find the url and can query
the respected database.
Input:
dbpass_path path to the text file with the list of database urls
dbname database name so we know which database to query from the list
"""
def __init__(self, dbpass_path, dbname):
self.dbpass_path = dbpass_path
self.dbname = dbname
self.db_url = self.get_db_url()
self.engine = create_engine(self.db_url)
def get_db_url(self):
with open(self.dbpass_path, 'r') as infile:
db_names = []
for raw_url in infile.read().splitlines():
url_obj = make_url(raw_url)
if url_obj.database == self.dbname:
infile.close()
return raw_url
db_names.append(url_obj.database)
infile.close()
exit('database name does not exist in dbpass given:' + ', '.join(db_names))
def get_df_with_query(self, query):
""" WARNING :: Will crash if too large. If so, you should just create the df file
first via create_df_file(query=).
load example:
with open(input, 'rb') as infile:
objs = []
while True:
try:
obj = pickle.load(infile)
except EOFError:
break
...
"""
return pd.read_sql(query, self.engine)
def create_df_file_with_query(self, query, output):
""" Dumps in df in chunks to avoid crashes.
"""
chunk_size = 100000
offset = 0
data = defaultdict(lambda : defaultdict(list))
with open(output, 'wb') as outfile:
query = query.replace(';', '')
query += """ LIMIT {chunk_size} OFFSET {offset};"""
while True:
print(offset)
query = query.format(
chunk_size=chunk_size,
offset=offset
)
df = pd.read_sql(query, self.engine)
pickle.dump(df, outfile)
offset += chunk_size
if len(df) < chunk_size:
break
outfile.close()
| 33.666667 | 90 | 0.540594 | [
"MIT"
] | MCSZ/pyontutils | ilxutils/ilxutils/database_client.py | 2,525 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VpnSitesConfigurationOperations:
"""VpnSitesConfigurationOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _download_initial(
self,
resource_group_name: str,
virtual_wan_name: str,
request: "_models.GetVpnSitesConfigurationRequest",
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._download_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(request, 'GetVpnSitesConfigurationRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_download_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/vpnConfiguration'} # type: ignore
async def begin_download(
self,
resource_group_name: str,
virtual_wan_name: str,
request: "_models.GetVpnSitesConfigurationRequest",
**kwargs
) -> AsyncLROPoller[None]:
"""Gives the sas-url to download the configurations for vpn-sites in a resource group.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN for which configuration of all vpn-sites is
needed.
:type virtual_wan_name: str
:param request: Parameters supplied to download vpn-sites configuration.
:type request: ~azure.mgmt.network.v2020_06_01.models.GetVpnSitesConfigurationRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._download_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
request=request,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_download.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/vpnConfiguration'} # type: ignore
| 50.490798 | 200 | 0.685541 | [
"MIT"
] | AriZavala2/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_06_01/aio/operations/_vpn_sites_configuration_operations.py | 8,230 | Python |
def testaArq(arq):
"""
-> Verifica se existe o arquivo arq
:arq: Nome do arquivo a ser testado.
:return: retorna True se o arquivo for encontrado,
caso contrário False
"""
try:
a = open(arq)
except FileNotFoundError: # O arquivo não foi encontrado
print('Arquivo não encontrado!')
return False
else:
return True
def criaArq(arq=''):
"""
-> Cria um arquivo de texto, caso ele não exista.
:param arq: Nome do arquivo.
:return:
"""
try:
a = open(arq, 'xt')
except FileExistsError:
print(f'ERRO: o arquivo \"{arq}\" já existe!')
else:
print(f'O arquivo \"{arq}\" foi criado com sucesso!')
finally:
a.close()
return
def leArq(arq=''):
"""
-> Abre e mostra os itens de um arquivo texto.
:param arq: Nome do arquivo.
:return:
"""
return
def editaArq(arq):
"""
-> Abre um arquivo de texto e adiciona novo item no
final do arquivo.
:param arq: Nome do arquivo.
:return:
"""
return
| 21.333333 | 61 | 0.560662 | [
"MIT"
] | EduardoPessanha/Python | bibli/arquivo/__init__.py | 1,093 | Python |
# -*- coding: utf-8 -*-
__author__ = 'abbot'
from selenium import webdriver
from selenium.webdriver import ActionChains
driver = webdriver.PhantomJS(executable_path='/Users/wangbo/Downloads/phantomjs-2.1.1-macosx/bin/phantomjs')
ac = driver.find_element_by_xpath('element')
ActionChains(driver).move_to_element(ac).perform()
ActionChains(driver).move_to_element(ac).click(ac).perform()
| 26.133333 | 108 | 0.783163 | [
"MIT"
] | fuandenghuo/100-days-of-python | selenium_test/action.py | 392 | Python |
a = input('Digite algo: ')
print(type(a))
print(a.isnumeric())
print(a.capitalize())
print(a.isalnum())
print(a.isdecimal())
print(a.islower())
print(a.upper()) | 20 | 26 | 0.68125 | [
"MIT"
] | legna7/Python | Mundo 1/Ex04.py | 160 | Python |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A runnable program to evaluate video embeddings.
Given a model checkpoint, and the location of the shards for a dataset,
computes the performance of the Brave video embeddings. This code
may be used to evaluate both UCF101 and HMDB51, as long as they are both
given in the appropriate input format. The only hyperparameter to this program
is the svm_regularization constant, which can impact the performance of the
linear classification.
"""
import glob
import json
from absl import app
from absl import flags
import chex
import jax
import numpy as np
import tensorflow as tf
from brave.datasets import datasets
from brave.evaluate import evaluate_video_embedding
from brave.models.brave import brave
FLAGS = flags.FLAGS
flags.DEFINE_string('checkpoint_path', None, 'Checkpoint to evaluate.')
flags.DEFINE_integer('batch_size', None, 'The size of the batches to use.')
# Hyperparameters
flags.DEFINE_float('svm_regularization', None, 'Regularization constant.')
# Datasets
flags.DEFINE_string('train_dataset_shards', None,
'Glob pattern for train shards.')
flags.DEFINE_string('test_dataset_shards', None,
'Glob pattern for test shards.')
# Transformations to apply to video before running network.
flags.DEFINE_integer('num_video_frames', 32, 'Number of frames in eval videos.')
flags.DEFINE_integer('video_step', 2, 'The step to use in the eval videos.')
flags.DEFINE_integer('image_size', 224, 'The size of the video to evaluate.')
def main(_):
checkpoint_path = FLAGS.checkpoint_path
train_shards = glob.glob(FLAGS.train_dataset_shards)
test_shards = glob.glob(FLAGS.test_dataset_shards)
video_config = evaluate_video_embedding.VideoConfig(
num_frames=FLAGS.num_video_frames,
image_size=FLAGS.image_size,
video_step=FLAGS.video_step,
)
video_embedding_fn = _video_embedding(checkpoint_path)
results = evaluate_video_embedding.evaluate_video_embedding(
train_dataset_shards=train_shards,
test_dataset_shards=test_shards,
embedding_fn=video_embedding_fn,
config=video_config,
svm_regularization=FLAGS.svm_regularization,
batch_size=FLAGS.batch_size)
results_dct = dict(
top_1_train=results.train.top_one_accuracy,
top_5_train=results.train.top_five_accuracy,
top_1_test=results.test.top_one_accuracy,
top_5_test=results.test.top_five_accuracy,
)
# Write the results to stdout in a way that can be used as input to other
# programs.
print(json.dumps(results_dct))
def _video_embedding(checkpoint_path: str):
"""Load the video embedding for the BraVe model to evaluate."""
checkpoint = np.load(checkpoint_path, allow_pickle=True).item()
params = checkpoint['params']
state = checkpoint['state']
brave_config_dct = checkpoint['config']
brave_config = brave.BraveConfig(**brave_config_dct)
model = brave.get_model(brave_config)
@jax.jit
def embedding_fn(view: datasets.View) -> chex.Array:
narrow_forward_fn = model.forward_fns['narrow_video']
embedding, _ = narrow_forward_fn(params, state, None, view, False)
return embedding
def synchronous_embedding_fn(view: datasets.View) -> chex.Array:
# jax.jit causes the above function to be executed lazily, but we want
# to force the computation to happen synchronously.
return jax.device_get(embedding_fn(view))
return synchronous_embedding_fn
if __name__ == '__main__':
try:
tf.config.set_visible_devices([], 'GPU') # Prevent TF from using the GPU.
except tf.errors.NotFoundError:
pass
flags.mark_flag_as_required('checkpoint_path')
flags.mark_flag_as_required('batch_size')
flags.mark_flag_as_required('train_dataset_shards')
flags.mark_flag_as_required('test_dataset_shards')
flags.mark_flag_as_required('svm_regularization')
app.run(main)
| 34.381679 | 80 | 0.75222 | [
"Apache-2.0"
] | deepmind/brave | brave/evaluate_video_embeddings.py | 4,504 | Python |
"""API v2 tests."""
from django.urls import reverse
from modoboa.lib.tests import ModoAPITestCase
class TransportViewSetTestCase(ModoAPITestCase):
def test_list(self):
url = reverse("v2:transport-list")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
backends = resp.json()
self.assertEqual(len(backends), 1)
self.assertEqual(backends[0]["name"], "relay")
| 25.470588 | 54 | 0.672055 | [
"ISC"
] | suryatmodulus/modoboa | modoboa/transport/api/v2/tests.py | 433 | Python |
import logging
import player
import telnet
logger = logging.getLogger(__name__)
########################################################################
class TrainingHandler(telnet.MudTelnetHandler):
####################################################################
def __init__(self, protocol, player):
super(TrainingHandler, self).__init__(protocol)
self.player = player
####################################################################
def handle(self, data):
if data == "quit":
player.player_database.save()
self.protocol.remove_handler()
return
if data in ["1", "2", "3"]:
if self.player.stat_points > 0:
self.player.stat_points -= 1
if data == "1":
self.player.attributes.BASE_STRENGTH += 1
elif data == "2":
self.player.attributes.BASE_HEALTH += 1
else:
self.player.attributes.BASE_AGILITY += 1
self.print_stats(True)
else:
logger.warn("unknown command: %s", data)
self.send("<reset><clearscreen><red>Unknown Command '%s'<newline>" % data)
self.print_stats(False)
####################################################################
def enter(self):
self.player.active = False
if self.player.newbie:
self.send(("<magenta><bold>Welcome to SimpleMUD, %s!\r\n" +
"You must train your character with your desired stats,\r\n" +
"before you enter the realm.\r\n\r\n") % self.player.name)
self.player.newbie = False
self.print_stats(False)
####################################################################
def hung_up(self):
logger.warn("%s - hung up in %s", self.protocol.get_remote_address(), self.__class__.__name__)
player.player_database.logout(self.player.id)
####################################################################
def flooded(self):
logger.warn("%s - flooded in %s", self.protocol.get_remote_address(), self.__class__.__name__)
player.player_database.logout(self.player.id)
####################################################################
def print_stats(self, clear_screen=True):
message = []
if clear_screen:
message.append("<clearscreen>")
message += ["<white><bold>"]
message.append("---------------------- Your Stats ----------------------\r\n")
message.append("<dim>")
message.append("Player: %s\r\n" % self.player.name)
message.append("Stat Points Left: %s\r\n" % self.player.stat_points)
message.append("1) Strength: %s\r\n" % self.player.attributes.STRENGTH)
message.append("2) Health: %s\r\n" % self.player.attributes.HEALTH)
message.append("3) Agility: %s\r\n" % self.player.attributes.AGILITY)
message.append("<bold>")
message.append("--------------------------------------------------------\r\n")
message.append("Enter 1, 2, or 3 to add a stat point, or \"quit\" to go back: ")
self.send("".join(message))
| 43.527027 | 102 | 0.473455 | [
"MIT"
] | tobynance/simple_mud | src/training_handler.py | 3,221 | Python |
class DataGridViewAutoSizeColumnMode(Enum,IComparable,IFormattable,IConvertible):
"""
Defines values for specifying how the width of a column is adjusted.
enum DataGridViewAutoSizeColumnMode,values: AllCells (6),AllCellsExceptHeader (4),ColumnHeader (2),DisplayedCells (10),DisplayedCellsExceptHeader (8),Fill (16),None (1),NotSet (0)
"""
def Instance(self):
""" This function has been arbitrarily put into the stubs"""
return DataGridViewAutoSizeColumnMode()
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
AllCells=None
AllCellsExceptHeader=None
ColumnHeader=None
DisplayedCells=None
DisplayedCellsExceptHeader=None
Fill=None
None_ =None
NotSet=None
value__=None
| 31.227273 | 215 | 0.703057 | [
"MIT"
] | tranconbv/ironpython-stubs | release/stubs.min/System/Windows/Forms/__init___parts/DataGridViewAutoSizeColumnMode.py | 1,374 | Python |
import pytest
from importtime_output_wrapper import Import
from importtime_output_wrapper import parse_import_time
from importtime_output_wrapper import InvalidInput
imp_a0 = Import(name="a0", t_self=4, t_cumu=5, depth=2, childs=[])
imp_a1 = Import(name="a1", t_self=3, t_cumu=4, depth=2, childs=[])
imp_b0 = Import(name="b0", t_self=4, t_cumu=5, depth=2, childs=[])
imp_b1 = Import(name="b1", t_self=3, t_cumu=4, depth=2, childs=[])
imp_b = Import(name="b", t_self=2, t_cumu=3, depth=1, childs=[imp_b0, imp_b1])
imp_a = Import(name="a", t_self=1, t_cumu=2, depth=1, childs=[imp_a0, imp_a1])
root = Import(name="root", t_self=0, t_cumu=0, depth=0, childs=[imp_a, imp_b])
test_tree = [root]
with open("tests/sample_importtime_output") as f:
test_output_string = f.read()
@pytest.mark.parametrize(("test_input", "expected"), ((test_output_string, test_tree),))
def test_parse_std_err(test_input, expected):
assert parse_import_time(test_input) == expected
def test_parse_empty_std_err():
with pytest.raises(InvalidInput):
parse_import_time("")
| 34.580645 | 88 | 0.731343 | [
"MIT"
] | Victor333Huesca/importtime-output-wrapper | tests/test_parse_import_time.py | 1,072 | Python |
import contextlib
import os
import sys
import tracemalloc
import unittest
from unittest.mock import patch
from test.support.script_helper import (assert_python_ok, assert_python_failure,
interpreter_requires_environment)
from test import support
try:
import threading
except ImportError:
threading = None
try:
import _testcapi
except ImportError:
_testcapi = None
EMPTY_STRING_SIZE = sys.getsizeof(b'')
def get_frames(nframe, lineno_delta):
frames = []
frame = sys._getframe(1)
for index in range(nframe):
code = frame.f_code
lineno = frame.f_lineno + lineno_delta
frames.append((code.co_filename, lineno))
lineno_delta = 0
frame = frame.f_back
if frame is None:
break
return tuple(frames)
def allocate_bytes(size):
nframe = tracemalloc.get_traceback_limit()
bytes_len = (size - EMPTY_STRING_SIZE)
frames = get_frames(nframe, 1)
data = b'x' * bytes_len
return data, tracemalloc.Traceback(frames)
def create_snapshots():
traceback_limit = 2
# _tracemalloc._get_traces() returns a list of (domain, size,
# traceback_frames) tuples. traceback_frames is a tuple of (filename,
# line_number) tuples.
raw_traces = [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(1, 2, (('a.py', 5), ('b.py', 4))),
(2, 66, (('b.py', 1),)),
(3, 7, (('<unknown>', 0),)),
]
snapshot = tracemalloc.Snapshot(raw_traces, traceback_limit)
raw_traces2 = [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(2, 2, (('a.py', 5), ('b.py', 4))),
(2, 5000, (('a.py', 5), ('b.py', 4))),
(4, 400, (('c.py', 578),)),
]
snapshot2 = tracemalloc.Snapshot(raw_traces2, traceback_limit)
return (snapshot, snapshot2)
def frame(filename, lineno):
return tracemalloc._Frame((filename, lineno))
def traceback(*frames):
return tracemalloc.Traceback(frames)
def traceback_lineno(filename, lineno):
return traceback((filename, lineno))
def traceback_filename(filename):
return traceback_lineno(filename, 0)
class TestTracemallocEnabled(unittest.TestCase):
def setUp(self):
if tracemalloc.is_tracing():
self.skipTest("tracemalloc must be stopped before the test")
tracemalloc.start(1)
def tearDown(self):
tracemalloc.stop()
def test_get_tracemalloc_memory(self):
data = [allocate_bytes(123) for count in range(1000)]
size = tracemalloc.get_tracemalloc_memory()
self.assertGreaterEqual(size, 0)
tracemalloc.clear_traces()
size2 = tracemalloc.get_tracemalloc_memory()
self.assertGreaterEqual(size2, 0)
self.assertLessEqual(size2, size)
def test_get_object_traceback(self):
tracemalloc.clear_traces()
obj_size = 12345
obj, obj_traceback = allocate_bytes(obj_size)
traceback = tracemalloc.get_object_traceback(obj)
self.assertEqual(traceback, obj_traceback)
def test_set_traceback_limit(self):
obj_size = 10
tracemalloc.stop()
self.assertRaises(ValueError, tracemalloc.start, -1)
tracemalloc.stop()
tracemalloc.start(10)
obj2, obj2_traceback = allocate_bytes(obj_size)
traceback = tracemalloc.get_object_traceback(obj2)
self.assertEqual(len(traceback), 10)
self.assertEqual(traceback, obj2_traceback)
tracemalloc.stop()
tracemalloc.start(1)
obj, obj_traceback = allocate_bytes(obj_size)
traceback = tracemalloc.get_object_traceback(obj)
self.assertEqual(len(traceback), 1)
self.assertEqual(traceback, obj_traceback)
def find_trace(self, traces, traceback):
for trace in traces:
if trace[2] == traceback._frames:
return trace
self.fail("trace not found")
def test_get_traces(self):
tracemalloc.clear_traces()
obj_size = 12345
obj, obj_traceback = allocate_bytes(obj_size)
traces = tracemalloc._get_traces()
trace = self.find_trace(traces, obj_traceback)
self.assertIsInstance(trace, tuple)
domain, size, traceback = trace
self.assertEqual(size, obj_size)
self.assertEqual(traceback, obj_traceback._frames)
tracemalloc.stop()
self.assertEqual(tracemalloc._get_traces(), [])
def test_get_traces_intern_traceback(self):
# dummy wrappers to get more useful and identical frames in the traceback
def allocate_bytes2(size):
return allocate_bytes(size)
def allocate_bytes3(size):
return allocate_bytes2(size)
def allocate_bytes4(size):
return allocate_bytes3(size)
# Ensure that two identical tracebacks are not duplicated
tracemalloc.stop()
tracemalloc.start(4)
obj_size = 123
obj1, obj1_traceback = allocate_bytes4(obj_size)
obj2, obj2_traceback = allocate_bytes4(obj_size)
traces = tracemalloc._get_traces()
trace1 = self.find_trace(traces, obj1_traceback)
trace2 = self.find_trace(traces, obj2_traceback)
domain1, size1, traceback1 = trace1
domain2, size2, traceback2 = trace2
self.assertIs(traceback2, traceback1)
def test_get_traced_memory(self):
# Python allocates some internals objects, so the test must tolerate
# a small difference between the expected size and the real usage
max_error = 2048
# allocate one object
obj_size = 1024 * 1024
tracemalloc.clear_traces()
obj, obj_traceback = allocate_bytes(obj_size)
size, peak_size = tracemalloc.get_traced_memory()
self.assertGreaterEqual(size, obj_size)
self.assertGreaterEqual(peak_size, size)
self.assertLessEqual(size - obj_size, max_error)
self.assertLessEqual(peak_size - size, max_error)
# destroy the object
obj = None
size2, peak_size2 = tracemalloc.get_traced_memory()
self.assertLess(size2, size)
self.assertGreaterEqual(size - size2, obj_size - max_error)
self.assertGreaterEqual(peak_size2, peak_size)
# clear_traces() must reset traced memory counters
tracemalloc.clear_traces()
self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))
# allocate another object
obj, obj_traceback = allocate_bytes(obj_size)
size, peak_size = tracemalloc.get_traced_memory()
self.assertGreaterEqual(size, obj_size)
# stop() also resets traced memory counters
tracemalloc.stop()
self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))
def test_clear_traces(self):
obj, obj_traceback = allocate_bytes(123)
traceback = tracemalloc.get_object_traceback(obj)
self.assertIsNotNone(traceback)
tracemalloc.clear_traces()
traceback2 = tracemalloc.get_object_traceback(obj)
self.assertIsNone(traceback2)
def test_is_tracing(self):
tracemalloc.stop()
self.assertFalse(tracemalloc.is_tracing())
tracemalloc.start()
self.assertTrue(tracemalloc.is_tracing())
def test_snapshot(self):
obj, source = allocate_bytes(123)
# take a snapshot
snapshot = tracemalloc.take_snapshot()
# write on disk
snapshot.dump(support.TESTFN)
self.addCleanup(support.unlink, support.TESTFN)
# load from disk
snapshot2 = tracemalloc.Snapshot.load(support.TESTFN)
self.assertEqual(snapshot2.traces, snapshot.traces)
# tracemalloc must be tracing memory allocations to take a snapshot
tracemalloc.stop()
with self.assertRaises(RuntimeError) as cm:
tracemalloc.take_snapshot()
self.assertEqual(str(cm.exception),
"the tracemalloc module must be tracing memory "
"allocations to take a snapshot")
def test_snapshot_save_attr(self):
# take a snapshot with a new attribute
snapshot = tracemalloc.take_snapshot()
snapshot.test_attr = "new"
snapshot.dump(support.TESTFN)
self.addCleanup(support.unlink, support.TESTFN)
# load() should recreate the attribute
snapshot2 = tracemalloc.Snapshot.load(support.TESTFN)
self.assertEqual(snapshot2.test_attr, "new")
def fork_child(self):
if not tracemalloc.is_tracing():
return 2
obj_size = 12345
obj, obj_traceback = allocate_bytes(obj_size)
traceback = tracemalloc.get_object_traceback(obj)
if traceback is None:
return 3
# everything is fine
return 0
@unittest.skipUnless(hasattr(os, 'fork'), 'need os.fork()')
def test_fork(self):
# check that tracemalloc is still working after fork
pid = os.fork()
if not pid:
# child
exitcode = 1
try:
exitcode = self.fork_child()
finally:
os._exit(exitcode)
else:
pid2, status = os.waitpid(pid, 0)
self.assertTrue(os.WIFEXITED(status))
exitcode = os.WEXITSTATUS(status)
self.assertEqual(exitcode, 0)
class TestSnapshot(unittest.TestCase):
maxDiff = 4000
def test_create_snapshot(self):
raw_traces = [(0, 5, (('a.py', 2),))]
with contextlib.ExitStack() as stack:
stack.enter_context(patch.object(tracemalloc, 'is_tracing',
return_value=True))
stack.enter_context(patch.object(tracemalloc, 'get_traceback_limit',
return_value=5))
stack.enter_context(patch.object(tracemalloc, '_get_traces',
return_value=raw_traces))
snapshot = tracemalloc.take_snapshot()
self.assertEqual(snapshot.traceback_limit, 5)
self.assertEqual(len(snapshot.traces), 1)
trace = snapshot.traces[0]
self.assertEqual(trace.size, 5)
self.assertEqual(len(trace.traceback), 1)
self.assertEqual(trace.traceback[0].filename, 'a.py')
self.assertEqual(trace.traceback[0].lineno, 2)
def test_filter_traces(self):
snapshot, snapshot2 = create_snapshots()
filter1 = tracemalloc.Filter(False, "b.py")
filter2 = tracemalloc.Filter(True, "a.py", 2)
filter3 = tracemalloc.Filter(True, "a.py", 5)
original_traces = list(snapshot.traces._traces)
# exclude b.py
snapshot3 = snapshot.filter_traces((filter1,))
self.assertEqual(snapshot3.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(1, 2, (('a.py', 5), ('b.py', 4))),
(3, 7, (('<unknown>', 0),)),
])
# filter_traces() must not touch the original snapshot
self.assertEqual(snapshot.traces._traces, original_traces)
# only include two lines of a.py
snapshot4 = snapshot3.filter_traces((filter2, filter3))
self.assertEqual(snapshot4.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(1, 2, (('a.py', 5), ('b.py', 4))),
])
# No filter: just duplicate the snapshot
snapshot5 = snapshot.filter_traces(())
self.assertIsNot(snapshot5, snapshot)
self.assertIsNot(snapshot5.traces, snapshot.traces)
self.assertEqual(snapshot5.traces, snapshot.traces)
self.assertRaises(TypeError, snapshot.filter_traces, filter1)
def test_filter_traces_domain(self):
snapshot, snapshot2 = create_snapshots()
filter1 = tracemalloc.Filter(False, "a.py", domain=1)
filter2 = tracemalloc.Filter(True, "a.py", domain=1)
original_traces = list(snapshot.traces._traces)
# exclude a.py of domain 1
snapshot3 = snapshot.filter_traces((filter1,))
self.assertEqual(snapshot3.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(2, 66, (('b.py', 1),)),
(3, 7, (('<unknown>', 0),)),
])
# include domain 1
snapshot3 = snapshot.filter_traces((filter1,))
self.assertEqual(snapshot3.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(2, 66, (('b.py', 1),)),
(3, 7, (('<unknown>', 0),)),
])
def test_filter_traces_domain_filter(self):
snapshot, snapshot2 = create_snapshots()
filter1 = tracemalloc.DomainFilter(False, domain=3)
filter2 = tracemalloc.DomainFilter(True, domain=3)
# exclude domain 2
snapshot3 = snapshot.filter_traces((filter1,))
self.assertEqual(snapshot3.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(1, 2, (('a.py', 5), ('b.py', 4))),
(2, 66, (('b.py', 1),)),
])
# include domain 2
snapshot3 = snapshot.filter_traces((filter2,))
self.assertEqual(snapshot3.traces._traces, [
(3, 7, (('<unknown>', 0),)),
])
def test_snapshot_group_by_line(self):
snapshot, snapshot2 = create_snapshots()
tb_0 = traceback_lineno('<unknown>', 0)
tb_a_2 = traceback_lineno('a.py', 2)
tb_a_5 = traceback_lineno('a.py', 5)
tb_b_1 = traceback_lineno('b.py', 1)
tb_c_578 = traceback_lineno('c.py', 578)
# stats per file and line
stats1 = snapshot.statistics('lineno')
self.assertEqual(stats1, [
tracemalloc.Statistic(tb_b_1, 66, 1),
tracemalloc.Statistic(tb_a_2, 30, 3),
tracemalloc.Statistic(tb_0, 7, 1),
tracemalloc.Statistic(tb_a_5, 2, 1),
])
# stats per file and line (2)
stats2 = snapshot2.statistics('lineno')
self.assertEqual(stats2, [
tracemalloc.Statistic(tb_a_5, 5002, 2),
tracemalloc.Statistic(tb_c_578, 400, 1),
tracemalloc.Statistic(tb_a_2, 30, 3),
])
# stats diff per file and line
statistics = snapshot2.compare_to(snapshot, 'lineno')
self.assertEqual(statistics, [
tracemalloc.StatisticDiff(tb_a_5, 5002, 5000, 2, 1),
tracemalloc.StatisticDiff(tb_c_578, 400, 400, 1, 1),
tracemalloc.StatisticDiff(tb_b_1, 0, -66, 0, -1),
tracemalloc.StatisticDiff(tb_0, 0, -7, 0, -1),
tracemalloc.StatisticDiff(tb_a_2, 30, 0, 3, 0),
])
def test_snapshot_group_by_file(self):
snapshot, snapshot2 = create_snapshots()
tb_0 = traceback_filename('<unknown>')
tb_a = traceback_filename('a.py')
tb_b = traceback_filename('b.py')
tb_c = traceback_filename('c.py')
# stats per file
stats1 = snapshot.statistics('filename')
self.assertEqual(stats1, [
tracemalloc.Statistic(tb_b, 66, 1),
tracemalloc.Statistic(tb_a, 32, 4),
tracemalloc.Statistic(tb_0, 7, 1),
])
# stats per file (2)
stats2 = snapshot2.statistics('filename')
self.assertEqual(stats2, [
tracemalloc.Statistic(tb_a, 5032, 5),
tracemalloc.Statistic(tb_c, 400, 1),
])
# stats diff per file
diff = snapshot2.compare_to(snapshot, 'filename')
self.assertEqual(diff, [
tracemalloc.StatisticDiff(tb_a, 5032, 5000, 5, 1),
tracemalloc.StatisticDiff(tb_c, 400, 400, 1, 1),
tracemalloc.StatisticDiff(tb_b, 0, -66, 0, -1),
tracemalloc.StatisticDiff(tb_0, 0, -7, 0, -1),
])
def test_snapshot_group_by_traceback(self):
snapshot, snapshot2 = create_snapshots()
# stats per file
tb1 = traceback(('a.py', 2), ('b.py', 4))
tb2 = traceback(('a.py', 5), ('b.py', 4))
tb3 = traceback(('b.py', 1))
tb4 = traceback(('<unknown>', 0))
stats1 = snapshot.statistics('traceback')
self.assertEqual(stats1, [
tracemalloc.Statistic(tb3, 66, 1),
tracemalloc.Statistic(tb1, 30, 3),
tracemalloc.Statistic(tb4, 7, 1),
tracemalloc.Statistic(tb2, 2, 1),
])
# stats per file (2)
tb5 = traceback(('c.py', 578))
stats2 = snapshot2.statistics('traceback')
self.assertEqual(stats2, [
tracemalloc.Statistic(tb2, 5002, 2),
tracemalloc.Statistic(tb5, 400, 1),
tracemalloc.Statistic(tb1, 30, 3),
])
# stats diff per file
diff = snapshot2.compare_to(snapshot, 'traceback')
self.assertEqual(diff, [
tracemalloc.StatisticDiff(tb2, 5002, 5000, 2, 1),
tracemalloc.StatisticDiff(tb5, 400, 400, 1, 1),
tracemalloc.StatisticDiff(tb3, 0, -66, 0, -1),
tracemalloc.StatisticDiff(tb4, 0, -7, 0, -1),
tracemalloc.StatisticDiff(tb1, 30, 0, 3, 0),
])
self.assertRaises(ValueError,
snapshot.statistics, 'traceback', cumulative=True)
def test_snapshot_group_by_cumulative(self):
snapshot, snapshot2 = create_snapshots()
tb_0 = traceback_filename('<unknown>')
tb_a = traceback_filename('a.py')
tb_b = traceback_filename('b.py')
tb_a_2 = traceback_lineno('a.py', 2)
tb_a_5 = traceback_lineno('a.py', 5)
tb_b_1 = traceback_lineno('b.py', 1)
tb_b_4 = traceback_lineno('b.py', 4)
# per file
stats = snapshot.statistics('filename', True)
self.assertEqual(stats, [
tracemalloc.Statistic(tb_b, 98, 5),
tracemalloc.Statistic(tb_a, 32, 4),
tracemalloc.Statistic(tb_0, 7, 1),
])
# per line
stats = snapshot.statistics('lineno', True)
self.assertEqual(stats, [
tracemalloc.Statistic(tb_b_1, 66, 1),
tracemalloc.Statistic(tb_b_4, 32, 4),
tracemalloc.Statistic(tb_a_2, 30, 3),
tracemalloc.Statistic(tb_0, 7, 1),
tracemalloc.Statistic(tb_a_5, 2, 1),
])
def test_trace_format(self):
snapshot, snapshot2 = create_snapshots()
trace = snapshot.traces[0]
self.assertEqual(str(trace), 'a.py:2: 10 B')
traceback = trace.traceback
self.assertEqual(str(traceback), 'a.py:2')
frame = traceback[0]
self.assertEqual(str(frame), 'a.py:2')
def test_statistic_format(self):
snapshot, snapshot2 = create_snapshots()
stats = snapshot.statistics('lineno')
stat = stats[0]
self.assertEqual(str(stat),
'b.py:1: size=66 B, count=1, average=66 B')
def test_statistic_diff_format(self):
snapshot, snapshot2 = create_snapshots()
stats = snapshot2.compare_to(snapshot, 'lineno')
stat = stats[0]
self.assertEqual(str(stat),
'a.py:5: size=5002 B (+5000 B), count=2 (+1), average=2501 B')
def test_slices(self):
snapshot, snapshot2 = create_snapshots()
self.assertEqual(snapshot.traces[:2],
(snapshot.traces[0], snapshot.traces[1]))
traceback = snapshot.traces[0].traceback
self.assertEqual(traceback[:2],
(traceback[0], traceback[1]))
def test_format_traceback(self):
snapshot, snapshot2 = create_snapshots()
def getline(filename, lineno):
return ' <%s, %s>' % (filename, lineno)
with unittest.mock.patch('tracemalloc.linecache.getline',
side_effect=getline):
tb = snapshot.traces[0].traceback
self.assertEqual(tb.format(),
[' File "a.py", line 2',
' <a.py, 2>',
' File "b.py", line 4',
' <b.py, 4>'])
self.assertEqual(tb.format(limit=1),
[' File "a.py", line 2',
' <a.py, 2>'])
self.assertEqual(tb.format(limit=-1),
[])
class TestFilters(unittest.TestCase):
maxDiff = 2048
def test_filter_attributes(self):
# test default values
f = tracemalloc.Filter(True, "abc")
self.assertEqual(f.inclusive, True)
self.assertEqual(f.filename_pattern, "abc")
self.assertIsNone(f.lineno)
self.assertEqual(f.all_frames, False)
# test custom values
f = tracemalloc.Filter(False, "test.py", 123, True)
self.assertEqual(f.inclusive, False)
self.assertEqual(f.filename_pattern, "test.py")
self.assertEqual(f.lineno, 123)
self.assertEqual(f.all_frames, True)
# parameters passed by keyword
f = tracemalloc.Filter(inclusive=False, filename_pattern="test.py", lineno=123, all_frames=True)
self.assertEqual(f.inclusive, False)
self.assertEqual(f.filename_pattern, "test.py")
self.assertEqual(f.lineno, 123)
self.assertEqual(f.all_frames, True)
# read-only attribute
self.assertRaises(AttributeError, setattr, f, "filename_pattern", "abc")
def test_filter_match(self):
# filter without line number
f = tracemalloc.Filter(True, "abc")
self.assertTrue(f._match_frame("abc", 0))
self.assertTrue(f._match_frame("abc", 5))
self.assertTrue(f._match_frame("abc", 10))
self.assertFalse(f._match_frame("12356", 0))
self.assertFalse(f._match_frame("12356", 5))
self.assertFalse(f._match_frame("12356", 10))
f = tracemalloc.Filter(False, "abc")
self.assertFalse(f._match_frame("abc", 0))
self.assertFalse(f._match_frame("abc", 5))
self.assertFalse(f._match_frame("abc", 10))
self.assertTrue(f._match_frame("12356", 0))
self.assertTrue(f._match_frame("12356", 5))
self.assertTrue(f._match_frame("12356", 10))
# filter with line number > 0
f = tracemalloc.Filter(True, "abc", 5)
self.assertFalse(f._match_frame("abc", 0))
self.assertTrue(f._match_frame("abc", 5))
self.assertFalse(f._match_frame("abc", 10))
self.assertFalse(f._match_frame("12356", 0))
self.assertFalse(f._match_frame("12356", 5))
self.assertFalse(f._match_frame("12356", 10))
f = tracemalloc.Filter(False, "abc", 5)
self.assertTrue(f._match_frame("abc", 0))
self.assertFalse(f._match_frame("abc", 5))
self.assertTrue(f._match_frame("abc", 10))
self.assertTrue(f._match_frame("12356", 0))
self.assertTrue(f._match_frame("12356", 5))
self.assertTrue(f._match_frame("12356", 10))
# filter with line number 0
f = tracemalloc.Filter(True, "abc", 0)
self.assertTrue(f._match_frame("abc", 0))
self.assertFalse(f._match_frame("abc", 5))
self.assertFalse(f._match_frame("abc", 10))
self.assertFalse(f._match_frame("12356", 0))
self.assertFalse(f._match_frame("12356", 5))
self.assertFalse(f._match_frame("12356", 10))
f = tracemalloc.Filter(False, "abc", 0)
self.assertFalse(f._match_frame("abc", 0))
self.assertTrue(f._match_frame("abc", 5))
self.assertTrue(f._match_frame("abc", 10))
self.assertTrue(f._match_frame("12356", 0))
self.assertTrue(f._match_frame("12356", 5))
self.assertTrue(f._match_frame("12356", 10))
def test_filter_match_filename(self):
def fnmatch(inclusive, filename, pattern):
f = tracemalloc.Filter(inclusive, pattern)
return f._match_frame(filename, 0)
self.assertTrue(fnmatch(True, "abc", "abc"))
self.assertFalse(fnmatch(True, "12356", "abc"))
self.assertFalse(fnmatch(True, "<unknown>", "abc"))
self.assertFalse(fnmatch(False, "abc", "abc"))
self.assertTrue(fnmatch(False, "12356", "abc"))
self.assertTrue(fnmatch(False, "<unknown>", "abc"))
def test_filter_match_filename_joker(self):
def fnmatch(filename, pattern):
filter = tracemalloc.Filter(True, pattern)
return filter._match_frame(filename, 0)
# empty string
self.assertFalse(fnmatch('abc', ''))
self.assertFalse(fnmatch('', 'abc'))
self.assertTrue(fnmatch('', ''))
self.assertTrue(fnmatch('', '*'))
# no *
self.assertTrue(fnmatch('abc', 'abc'))
self.assertFalse(fnmatch('abc', 'abcd'))
self.assertFalse(fnmatch('abc', 'def'))
# a*
self.assertTrue(fnmatch('abc', 'a*'))
self.assertTrue(fnmatch('abc', 'abc*'))
self.assertFalse(fnmatch('abc', 'b*'))
self.assertFalse(fnmatch('abc', 'abcd*'))
# a*b
self.assertTrue(fnmatch('abc', 'a*c'))
self.assertTrue(fnmatch('abcdcx', 'a*cx'))
self.assertFalse(fnmatch('abb', 'a*c'))
self.assertFalse(fnmatch('abcdce', 'a*cx'))
# a*b*c
self.assertTrue(fnmatch('abcde', 'a*c*e'))
self.assertTrue(fnmatch('abcbdefeg', 'a*bd*eg'))
self.assertFalse(fnmatch('abcdd', 'a*c*e'))
self.assertFalse(fnmatch('abcbdefef', 'a*bd*eg'))
# replace .pyc suffix with .py
self.assertTrue(fnmatch('a.pyc', 'a.py'))
self.assertTrue(fnmatch('a.py', 'a.pyc'))
if os.name == 'nt':
# case insensitive
self.assertTrue(fnmatch('aBC', 'ABc'))
self.assertTrue(fnmatch('aBcDe', 'Ab*dE'))
self.assertTrue(fnmatch('a.pyc', 'a.PY'))
self.assertTrue(fnmatch('a.py', 'a.PYC'))
else:
# case sensitive
self.assertFalse(fnmatch('aBC', 'ABc'))
self.assertFalse(fnmatch('aBcDe', 'Ab*dE'))
self.assertFalse(fnmatch('a.pyc', 'a.PY'))
self.assertFalse(fnmatch('a.py', 'a.PYC'))
if os.name == 'nt':
# normalize alternate separator "/" to the standard separator "\"
self.assertTrue(fnmatch(r'a/b', r'a\b'))
self.assertTrue(fnmatch(r'a\b', r'a/b'))
self.assertTrue(fnmatch(r'a/b\c', r'a\b/c'))
self.assertTrue(fnmatch(r'a/b/c', r'a\b\c'))
else:
# there is no alternate separator
self.assertFalse(fnmatch(r'a/b', r'a\b'))
self.assertFalse(fnmatch(r'a\b', r'a/b'))
self.assertFalse(fnmatch(r'a/b\c', r'a\b/c'))
self.assertFalse(fnmatch(r'a/b/c', r'a\b\c'))
# as of 3.5, .pyo is no longer munged to .py
self.assertFalse(fnmatch('a.pyo', 'a.py'))
def test_filter_match_trace(self):
t1 = (("a.py", 2), ("b.py", 3))
t2 = (("b.py", 4), ("b.py", 5))
t3 = (("c.py", 5), ('<unknown>', 0))
unknown = (('<unknown>', 0),)
f = tracemalloc.Filter(True, "b.py", all_frames=True)
self.assertTrue(f._match_traceback(t1))
self.assertTrue(f._match_traceback(t2))
self.assertFalse(f._match_traceback(t3))
self.assertFalse(f._match_traceback(unknown))
f = tracemalloc.Filter(True, "b.py", all_frames=False)
self.assertFalse(f._match_traceback(t1))
self.assertTrue(f._match_traceback(t2))
self.assertFalse(f._match_traceback(t3))
self.assertFalse(f._match_traceback(unknown))
f = tracemalloc.Filter(False, "b.py", all_frames=True)
self.assertFalse(f._match_traceback(t1))
self.assertFalse(f._match_traceback(t2))
self.assertTrue(f._match_traceback(t3))
self.assertTrue(f._match_traceback(unknown))
f = tracemalloc.Filter(False, "b.py", all_frames=False)
self.assertTrue(f._match_traceback(t1))
self.assertFalse(f._match_traceback(t2))
self.assertTrue(f._match_traceback(t3))
self.assertTrue(f._match_traceback(unknown))
f = tracemalloc.Filter(False, "<unknown>", all_frames=False)
self.assertTrue(f._match_traceback(t1))
self.assertTrue(f._match_traceback(t2))
self.assertTrue(f._match_traceback(t3))
self.assertFalse(f._match_traceback(unknown))
f = tracemalloc.Filter(True, "<unknown>", all_frames=True)
self.assertFalse(f._match_traceback(t1))
self.assertFalse(f._match_traceback(t2))
self.assertTrue(f._match_traceback(t3))
self.assertTrue(f._match_traceback(unknown))
f = tracemalloc.Filter(False, "<unknown>", all_frames=True)
self.assertTrue(f._match_traceback(t1))
self.assertTrue(f._match_traceback(t2))
self.assertFalse(f._match_traceback(t3))
self.assertFalse(f._match_traceback(unknown))
class TestCommandLine(unittest.TestCase):
def test_env_var_disabled_by_default(self):
# not tracing by default
code = 'import tracemalloc; print(tracemalloc.is_tracing())'
ok, stdout, stderr = assert_python_ok('-c', code)
stdout = stdout.rstrip()
self.assertEqual(stdout, b'False')
@unittest.skipIf(interpreter_requires_environment(),
'Cannot run -E tests when PYTHON env vars are required.')
def test_env_var_ignored_with_E(self):
"""PYTHON* environment variables must be ignored when -E is present."""
code = 'import tracemalloc; print(tracemalloc.is_tracing())'
ok, stdout, stderr = assert_python_ok('-E', '-c', code, PYTHONTRACEMALLOC='1')
stdout = stdout.rstrip()
self.assertEqual(stdout, b'False')
def test_env_var_enabled_at_startup(self):
# tracing at startup
code = 'import tracemalloc; print(tracemalloc.is_tracing())'
ok, stdout, stderr = assert_python_ok('-c', code, PYTHONTRACEMALLOC='1')
stdout = stdout.rstrip()
self.assertEqual(stdout, b'True')
def test_env_limit(self):
# start and set the number of frames
code = 'import tracemalloc; print(tracemalloc.get_traceback_limit())'
ok, stdout, stderr = assert_python_ok('-c', code, PYTHONTRACEMALLOC='10')
stdout = stdout.rstrip()
self.assertEqual(stdout, b'10')
def test_env_var_invalid(self):
for nframe in (-1, 0, 2**30):
with self.subTest(nframe=nframe):
with support.SuppressCrashReport():
ok, stdout, stderr = assert_python_failure(
'-c', 'pass',
PYTHONTRACEMALLOC=str(nframe))
self.assertIn(b'PYTHONTRACEMALLOC: invalid '
b'number of frames',
stderr)
def test_sys_xoptions(self):
for xoptions, nframe in (
('tracemalloc', 1),
('tracemalloc=1', 1),
('tracemalloc=15', 15),
):
with self.subTest(xoptions=xoptions, nframe=nframe):
code = 'import tracemalloc; print(tracemalloc.get_traceback_limit())'
ok, stdout, stderr = assert_python_ok('-X', xoptions, '-c', code)
stdout = stdout.rstrip()
self.assertEqual(stdout, str(nframe).encode('ascii'))
def test_sys_xoptions_invalid(self):
for nframe in (-1, 0, 2**30):
with self.subTest(nframe=nframe):
with support.SuppressCrashReport():
args = ('-X', 'tracemalloc=%s' % nframe, '-c', 'pass')
ok, stdout, stderr = assert_python_failure(*args)
self.assertIn(b'-X tracemalloc=NFRAME: invalid '
b'number of frames',
stderr)
def test_pymem_alloc0(self):
# Issue #21639: Check that PyMem_Malloc(0) with tracemalloc enabled
# does not crash.
code = 'import _testcapi; _testcapi.test_pymem_alloc0(); 1'
assert_python_ok('-X', 'tracemalloc', '-c', code)
@unittest.skipIf(_testcapi is None, 'need _testcapi')
class TestCAPI(unittest.TestCase):
maxDiff = 80 * 20
def setUp(self):
if tracemalloc.is_tracing():
self.skipTest("tracemalloc must be stopped before the test")
self.domain = 5
self.size = 123
self.obj = allocate_bytes(self.size)[0]
# for the type "object", id(obj) is the address of its memory block.
# This type is not tracked by the garbage collector
self.ptr = id(self.obj)
def tearDown(self):
tracemalloc.stop()
def get_traceback(self):
frames = _testcapi.tracemalloc_get_traceback(self.domain, self.ptr)
if frames is not None:
return tracemalloc.Traceback(frames)
else:
return None
def track(self, release_gil=False, nframe=1):
frames = get_frames(nframe, 2)
_testcapi.tracemalloc_track(self.domain, self.ptr, self.size,
release_gil)
return frames
def untrack(self):
_testcapi.tracemalloc_untrack(self.domain, self.ptr)
def get_traced_memory(self):
# Get the traced size in the domain
snapshot = tracemalloc.take_snapshot()
domain_filter = tracemalloc.DomainFilter(True, self.domain)
snapshot = snapshot.filter_traces([domain_filter])
return sum(trace.size for trace in snapshot.traces)
def check_track(self, release_gil):
nframe = 5
tracemalloc.start(nframe)
size = tracemalloc.get_traced_memory()[0]
frames = self.track(release_gil, nframe)
self.assertEqual(self.get_traceback(),
tracemalloc.Traceback(frames))
self.assertEqual(self.get_traced_memory(), self.size)
def test_track(self):
self.check_track(False)
def test_track_without_gil(self):
# check that calling _PyTraceMalloc_Track() without holding the GIL
# works too
self.check_track(True)
def test_track_already_tracked(self):
nframe = 5
tracemalloc.start(nframe)
# track a first time
self.track()
# calling _PyTraceMalloc_Track() must remove the old trace and add
# a new trace with the new traceback
frames = self.track(nframe=nframe)
self.assertEqual(self.get_traceback(),
tracemalloc.Traceback(frames))
def test_untrack(self):
tracemalloc.start()
self.track()
self.assertIsNotNone(self.get_traceback())
self.assertEqual(self.get_traced_memory(), self.size)
# untrack must remove the trace
self.untrack()
self.assertIsNone(self.get_traceback())
self.assertEqual(self.get_traced_memory(), 0)
# calling _PyTraceMalloc_Untrack() multiple times must not crash
self.untrack()
self.untrack()
def test_stop_track(self):
tracemalloc.start()
tracemalloc.stop()
with self.assertRaises(RuntimeError):
self.track()
self.assertIsNone(self.get_traceback())
def test_stop_untrack(self):
tracemalloc.start()
self.track()
tracemalloc.stop()
with self.assertRaises(RuntimeError):
self.untrack()
def test_main():
support.run_unittest(
TestTracemallocEnabled,
TestSnapshot,
TestFilters,
TestCommandLine,
TestCAPI,
)
if __name__ == "__main__":
test_main()
| 36.408451 | 104 | 0.593645 | [
"MIT"
] | BrainSpawnInfosphere/raspbian_pkgs | python3/Python-3.6.1/Lib/test/test_tracemalloc.py | 36,190 | Python |
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import numpy as np
import torch
from nnunet.network_architecture.generic_modular_residual_UNet import FabiansUNet, get_default_network_config
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.utilities.nd_softmax import softmax_helper
class nnUNetTrainerV2_ResencUNet(nnUNetTrainerV2):
def initialize_network(self):
if self.threeD:
cfg = get_default_network_config(3, None, norm_type="in")
else:
cfg = get_default_network_config(1, None, norm_type="in")
stage_plans = self.plans['plans_per_stage'][self.stage]
conv_kernel_sizes = stage_plans['conv_kernel_sizes']
blocks_per_stage_encoder = stage_plans['num_blocks_encoder']
blocks_per_stage_decoder = stage_plans['num_blocks_decoder']
pool_op_kernel_sizes = stage_plans['pool_op_kernel_sizes']
self.network = FabiansUNet(self.num_input_channels, self.base_num_features, blocks_per_stage_encoder, 2,
pool_op_kernel_sizes, conv_kernel_sizes, cfg, self.num_classes,
blocks_per_stage_decoder, True, False, 320, InitWeights_He(1e-2))
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
def setup_DA_params(self):
"""
net_num_pool_op_kernel_sizes is different in resunet
"""
super().setup_DA_params()
self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod(
np.vstack(self.net_num_pool_op_kernel_sizes[1:]), axis=0))[:-1]
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5,
save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
force_separate_z: bool = None, interpolation_order: int = 3, interpolation_order_z=0,
segmentation_export_kwargs: dict = None, run_postprocessing_on_folds: bool = True):
ds = self.network.decoder.deep_supervision
self.network.decoder.deep_supervision = False
ret = nnUNetTrainer.validate(self, do_mirroring=do_mirroring, use_sliding_window=use_sliding_window,
step_size=step_size, save_softmax=save_softmax, use_gaussian=use_gaussian,
overwrite=overwrite, validation_folder_name=validation_folder_name,
debug=debug, all_in_gpu=all_in_gpu,
segmentation_export_kwargs=segmentation_export_kwargs,
run_postprocessing_on_folds=run_postprocessing_on_folds)
self.network.decoder.deep_supervision = ds
return ret
def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,
mirror_axes: Tuple[int] = None,
use_sliding_window: bool = True, step_size: float = 0.5,
use_gaussian: bool = True, pad_border_mode: str = 'constant',
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]:
ds = self.network.decoder.deep_supervision
self.network.decoder.deep_supervision = False
ret = nnUNetTrainer.predict_preprocessed_data_return_seg_and_softmax(self, data, do_mirroring=do_mirroring,
mirror_axes=mirror_axes,
use_sliding_window=use_sliding_window,
step_size=step_size,
use_gaussian=use_gaussian,
pad_border_mode=pad_border_mode,
pad_kwargs=pad_kwargs,
all_in_gpu=all_in_gpu,
verbose=verbose,
mixed_precision=mixed_precision)
self.network.decoder.deep_supervision = ds
return ret
def run_training(self):
self.maybe_update_lr(self.epoch) # if we dont overwrite epoch then self.epoch+1 is used which is not what we
# want at the start of the training
ds = self.network.decoder.deep_supervision
self.network.decoder.deep_supervision = True
ret = nnUNetTrainer.run_training(self)
self.network.decoder.deep_supervision = ds
return ret
nnUNetTrainerV2_ResencUNet_copy1 = nnUNetTrainerV2_ResencUNet
nnUNetTrainerV2_ResencUNet_copy2 = nnUNetTrainerV2_ResencUNet
nnUNetTrainerV2_ResencUNet_copy3 = nnUNetTrainerV2_ResencUNet
nnUNetTrainerV2_ResencUNet_copy4 = nnUNetTrainerV2_ResencUNet
| 59.735849 | 134 | 0.609128 | [
"Apache-2.0"
] | ADVasculatureProject/nnUNet | nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_ResencUNet.py | 6,332 | Python |
# Add comments to explain what the output from this program will be and how you know.
def math1():
num1 = 50
num2 = 5
return num1 + num2
def math2():
num1 = 50
num2 = 5
return num1 - num2
def math3():
num1 = 50
num2 = 5
return num1 * num2
output_num = math2()
print(output_num)
'''
Add prediction(s) here:
# I think it will work because i am smart. I predict be 45
'''
| 15.461538 | 85 | 0.639303 | [
"Apache-2.0"
] | Athenian-ComputerScience-Fall2020/functions-practice-21lsparks | return_practice.py | 402 | Python |
#!/usr/bin/python
#
# A library for finding the optimal dirichlet prior from counts
# By: Max Sklar
# @maxsklar
# https://github.com/maxsklar
# Copyright 2013 Max Sklar
import math
import logging
import random
import scipy.special as mathExtra
import scipy
import numpy as np
def digamma(x): return mathExtra.psi(x)
def trigamma(x): return mathExtra.polygamma(1, x)
# Find the "sufficient statistic" for a group of multinomials.
# Essential, it's the average of the log probabilities
def getSufficientStatistic(multinomials):
N = len(multinomials)
K = len(multinomials[0])
retVal = [0]*K
for m in multinomials:
for k in range(0, K):
retVal[k] += math.log(m[k])
for k in range(0, K): retVal[k] /= N
return retVal
# Find the log probability of the data for a given dirichlet
# This is equal to the log probabiliy of the data.. up to a linear transform
def logProbForMultinomials(alphas, ss, delta):
alpha_sum = np.sum(alphas)
retVal = mathExtra.gammaln(alpha_sum)
retVal -= np.sum(mathExtra.gammaln(alphas))
retVal += np.sum(np.multiply(alphas, ss))
retVal -= delta * np.square(alphas).sum()
return retVal
#Gives the derivative with respect to the log of prior. This will be used to adjust the loss
def getGradientForMultinomials(alphas, ss, delta):
K = len(alphas)
C = digamma(sum(alphas)) # - DELTA * sum(alphas)
retVal = [C]*K
for k in range(0, K):
retVal[k] += ss[k] - digamma(alphas[k]) - 2 * delta * alphas[k]
return retVal
#The hessian is actually the sum of two matrices: a diagonal matrix and a constant-value matrix.
#We'll write two functions to get both
def priorHessianConst(alphas, ss, delta): return -trigamma(sum(alphas)) + 2 * delta
def priorHessianDiag(alphas, ss): return [trigamma(a) for a in alphas]
# Compute the next value to try here
# http://research.microsoft.com/en-us/um/people/minka/papers/dirichlet/minka-dirichlet.pdf (eq 18)
def getPredictedStep(hConst, hDiag, gradient):
K = len(gradient)
numSum = 0.0
for i in range(0, K):
numSum += gradient[i] / hDiag[i]
denSum = 0.0
for i in range(0, K): denSum += 1.0 / hDiag[i]
b = numSum / ((1.0/hConst) + denSum)
retVal = [0]*K
for i in range(0, K): retVal[i] = (b - gradient[i]) / hDiag[i]
return retVal
# Uses the diagonal hessian on the log-alpha values
def getPredictedStepAlt(hConst, hDiag, gradient, alphas):
K = len(gradient)
Z = 0
for k in range(0, K):
Z += alphas[k] / (gradient[k] - alphas[k]*hDiag[k])
Z *= hConst
Ss = [0]*K
for k in range(0, K):
Ss[k] = 1.0 / (gradient[k] - alphas[k]*hDiag[k]) / (1 + Z)
S = sum(Ss)
retVal = [0]*K
for i in range(0, K):
retVal[i] = gradient[i] / (gradient[i] - alphas[i]*hDiag[i]) * (1 - hConst * alphas[i] * S)
return retVal
#The priors and data are global, so we don't need to pass them in
def getTotalLoss(trialPriors, ss, delta):
return -1*logProbForMultinomials(trialPriors, ss, delta)
def predictStepUsingHessian(gradient, priors, ss, delta):
totalHConst = priorHessianConst(priors, ss, delta)
totalHDiag = priorHessianDiag(priors, ss)
return getPredictedStep(totalHConst, totalHDiag, gradient)
def predictStepLogSpace(gradient, priors, ss, delta):
totalHConst = priorHessianConst(priors, ss, delta)
totalHDiag = priorHessianDiag(priors, ss)
return getPredictedStepAlt(totalHConst, totalHDiag, gradient, priors)
# Returns whether it's a good step, and the loss
def testTrialPriors(trialPriors, ss, delta):
for alpha in trialPriors:
if alpha <= 0:
return float("inf")
return getTotalLoss(trialPriors, ss, delta)
def sqVectorSize(v):
s = 0
for i in range(0, len(v)): s += v[i] ** 2
return s
def findDirichletPriors(ss, initAlphas, max_iter=1000, delta=1e-2):
priors = initAlphas
# Let the learning begin!!
#Only step in a positive direction, get the current best loss.
currentLoss = getTotalLoss(priors, ss, delta)
gradientToleranceSq = 2 ** -20
learnRateTolerance = 2 ** -10
count = 0
while(count < max_iter):
count += 1
#Get the data for taking steps
gradient = getGradientForMultinomials(priors, ss, delta)
gradientSize = sqVectorSize(gradient)
#print(count, "Loss: ", currentLoss, ", Priors: ", priors, ", Gradient Size: ", gradientSize, gradient)
if (gradientSize < gradientToleranceSq):
#print("Converged with small gradient")
return priors
trialStep = predictStepUsingHessian(gradient, priors, ss, delta)
#First, try the second order method
trialPriors = [0]*len(priors)
for i in range(0, len(priors)): trialPriors[i] = priors[i] + trialStep[i]
loss = testTrialPriors(trialPriors, ss, delta)
if loss < currentLoss:
currentLoss = loss
priors = trialPriors
continue
trialStep = predictStepLogSpace(gradient, priors, ss, delta)
trialPriors = [0]*len(priors)
for i in range(0, len(priors)): trialPriors[i] = priors[i] * math.exp(trialStep[i])
loss = testTrialPriors(trialPriors, ss, delta)
#Step in the direction of the gradient until there is a loss improvement
loss = 10000000
learnRate = 1.0
while loss > currentLoss:
learnRate *= 0.9
trialPriors = [0]*len(priors)
for i in range(0, len(priors)): trialPriors[i] = priors[i] + gradient[i]*learnRate
loss = testTrialPriors(trialPriors, ss, delta)
if (learnRate < learnRateTolerance):
#print("Converged with small learn rate")
return priors
currentLoss = loss
priors = trialPriors
#print("Reached max iterations")
return priors
def findDirichletPriorsFromMultinomials(multinomials, initAlphas):
ss = getSufficientStatistic(multinomials)
return findDirichletPriors(ss, initAlphas)
| 30.164021 | 107 | 0.695141 | [
"BSD-3-Clause"
] | davesean/modular_semantic_segmentation | xview/models/dirichletEstimation.py | 5,701 | Python |
#!/usr/bin/env python3
import unittest
import networkit as nk
class TestReachability(unittest.TestCase):
def testReachableNodes(self):
for directed in [False, True]:
for exact in [False, True]:
g = nk.generators.ErdosRenyiGenerator(100, 0.01, directed).generate()
rn = nk.reachability.ReachableNodes(g, exact).run()
for u in g.iterNodes():
reached = []
nk.traversal.Traversal.BFSfrom(g, u, lambda v, _: reached.append(v))
if exact:
self.assertEqual(rn.numberOfReachableNodes(u), len(reached))
else:
self.assertLessEqual(rn.numberOfReachableNodesLB(u), len(reached))
self.assertGreaterEqual(rn.numberOfReachableNodesUB(u), len(reached))
if __name__ == "__main__":
unittest.main()
| 30.833333 | 75 | 0.709459 | [
"MIT"
] | CxVercility/networkit | networkit/test/test_reachability.py | 740 | Python |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from senlin.common import exception as exc
from senlin.profiles.os.nova import server
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
spec = {
'type': 'os.nova.server',
'version': '1.0',
'properties': {
'context': {},
'auto_disk_config': True,
'availability_zone': 'FAKE_AZ',
'block_device_mapping': [{
'device_name': 'FAKE_NAME',
'volume_size': 1000,
}],
'flavor': 'FLAV',
'image': 'FAKE_IMAGE',
'key_name': 'FAKE_KEYNAME',
"metadata": {"meta var": "meta val"},
'name': 'FAKE_SERVER_NAME',
'networks': [{
'floating_ip': 'FAKE_FLOATING_IP',
'floating_network': 'FAKE_FLOATING_NET',
'security_groups': ['FAKE_SECURITY_GROUP'],
'port': 'FAKE_PORT',
'fixed_ip': 'FAKE_IP',
'network': 'FAKE_NET',
}],
'scheduler_hints': {
'same_host': 'HOST_ID',
},
}
}
class TestAvailabilityZoneValidation(base.SenlinTestCase):
scenarios = [
('validate:success', dict(
reason=None,
success=True,
validate_result=[['FAKE_AZ']],
result='FAKE_AZ',
exception=None,
message='')),
('validate:driver_failure', dict(
reason=None,
success=False,
validate_result=exc.InternalError(message='BANG.'),
result='FAKE_AZ',
exception=exc.InternalError,
message='BANG.')),
('validate:not_found', dict(
reason=None,
success=False,
validate_result=[[]],
result='FAKE_AZ',
exception=exc.InvalidSpec,
message=("The specified availability_zone 'FAKE_AZ' could "
"not be found"))),
('create:success', dict(
reason='create',
success=True,
validate_result=[['FAKE_AZ']],
result='FAKE_AZ',
exception=None,
message='')),
('create:driver_failure', dict(
reason='create',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FAKE_AZ',
exception=exc.EResourceCreation,
message='Failed in creating server: BANG.')),
('create:not_found', dict(
reason='create',
success=False,
validate_result=[[]],
result='FAKE_AZ',
exception=exc.EResourceCreation,
message=("Failed in creating server: The specified "
"availability_zone 'FAKE_AZ' could not be found.")))
]
def setUp(self):
super(TestAvailabilityZoneValidation, self).setUp()
self.cc = mock.Mock()
prof = server.ServerProfile('t', spec)
prof._computeclient = self.cc
self.profile = prof
def test_validation(self):
self.cc.validate_azs.side_effect = self.validate_result
node = mock.Mock(id='NODE_ID')
if self.success:
res = self.profile._validate_az(node, 'FAKE_AZ', self.reason)
self.assertEqual(self.result, res)
else:
ex = self.assertRaises(self.exception,
self.profile._validate_az,
node, 'FAKE_AZ', self.reason)
self.assertEqual(self.message, six.text_type(ex))
self.cc.validate_azs.assert_called_once_with(['FAKE_AZ'])
class TestFlavorValidation(base.SenlinTestCase):
scenarios = [
('validate:success', dict(
reason=None,
success=True,
validate_result=[mock.Mock(id='FID', is_disabled=False)],
result='FID',
exception=None,
message='')),
('validate:driver_failure', dict(
reason=None,
success=False,
validate_result=exc.InternalError(message='BANG.'),
result='FID',
exception=exc.InternalError,
message='BANG.')),
('validate:not_found', dict(
reason=None,
success=False,
validate_result=exc.InternalError(code=404, message='BANG.'),
result='FID',
exception=exc.InvalidSpec,
message="The specified flavor 'FLAVOR' could not be found.")),
('validate:disabled', dict(
reason=None,
success=False,
validate_result=[mock.Mock(id='FID', is_disabled=True)],
result='FID',
exception=exc.InvalidSpec,
message="The specified flavor 'FLAVOR' is disabled")),
('create:success', dict(
reason='create',
success=True,
validate_result=[mock.Mock(id='FID', is_disabled=False)],
result='FID',
exception=None,
message='')),
('create:driver_failure', dict(
reason='create',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message='Failed in creating server: BANG.')),
('create:not_found', dict(
reason='create',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message="Failed in creating server: BANG.")),
('create:disabled', dict(
reason='create',
success=False,
validate_result=[mock.Mock(id='FID', is_disabled=True)],
result='FID',
exception=exc.EResourceCreation,
message=("Failed in creating server: The specified flavor "
"'FLAVOR' is disabled."))),
('update:success', dict(
reason='update',
success=True,
validate_result=[mock.Mock(id='FID', is_disabled=False)],
result='FID',
exception=None,
message='')),
('update:driver_failure', dict(
reason='update',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': BANG.")),
('update:not_found', dict(
reason='update',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': BANG.")),
('update:disabled', dict(
reason='update',
success=False,
validate_result=[mock.Mock(id='FID', is_disabled=True)],
result='FID',
exception=exc.EResourceUpdate,
message=("Failed in updating server 'NOVA_ID': The specified "
"flavor 'FLAVOR' is disabled.")))
]
def setUp(self):
super(TestFlavorValidation, self).setUp()
self.cc = mock.Mock()
self.profile = server.ServerProfile('t', spec)
self.profile._computeclient = self.cc
def test_validation(self):
self.cc.flavor_find.side_effect = self.validate_result
node = mock.Mock(id='NODE_ID', physical_id='NOVA_ID')
flavor = 'FLAVOR'
if self.success:
res = self.profile._validate_flavor(node, flavor, self.reason)
self.assertIsNotNone(res)
self.assertEqual(self.result, res.id)
else:
ex = self.assertRaises(self.exception,
self.profile._validate_flavor,
node, flavor, self.reason)
self.assertEqual(self.message, six.text_type(ex))
self.cc.flavor_find.assert_called_once_with(flavor, False)
class TestImageValidation(base.SenlinTestCase):
scenarios = [
('validate:success', dict(
reason=None,
success=True,
validate_result=[mock.Mock(id='IMAGE_ID')],
result='IMAGE_ID',
exception=None,
message='')),
('validate:driver_failure', dict(
reason=None,
success=False,
validate_result=exc.InternalError(message='BANG.'),
result='FID',
exception=exc.InternalError,
message='BANG.')),
('validate:not_found', dict(
reason=None,
success=False,
validate_result=exc.InternalError(code=404, message='BANG.'),
result='FID',
exception=exc.InvalidSpec,
message="The specified image 'IMAGE' could not be found.")),
('create:success', dict(
reason='create',
success=True,
validate_result=[mock.Mock(id='IMAGE_ID')],
result='IMAGE_ID',
exception=None,
message='')),
('create:driver_failure', dict(
reason='create',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message='Failed in creating server: BANG.')),
('create:not_found', dict(
reason='create',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message="Failed in creating server: BANG.")),
('update:success', dict(
reason='update',
success=True,
validate_result=[mock.Mock(id='IMAGE_ID')],
result='IMAGE_ID',
exception=None,
message='')),
('update:driver_failure', dict(
reason='update',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': BANG.")),
('update:not_found', dict(
reason='update',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': BANG.")),
]
def setUp(self):
super(TestImageValidation, self).setUp()
self.cc = mock.Mock()
self.gc = mock.Mock()
self.profile = server.ServerProfile('t', spec)
self.profile._computeclient = self.cc
self.profile._glanceclient = self.gc
def test_validation(self):
self.gc.image_find.side_effect = self.validate_result
node = mock.Mock(id='NODE_ID', physical_id='NOVA_ID')
image = 'IMAGE'
if self.success:
res = self.profile._validate_image(node, image, self.reason)
self.assertIsNotNone(res)
self.assertEqual(self.result, res.id)
else:
ex = self.assertRaises(self.exception,
self.profile._validate_image,
node, image, self.reason)
self.assertEqual(self.message, six.text_type(ex))
self.gc.image_find.assert_called_once_with(image, False)
class TestVolumeValidation(base.SenlinTestCase):
scenarios = [
('validate:success', dict(
reason=None,
success=True,
validate_result=[mock.Mock(id='VOLUME_ID', status='available')],
result='VOLUME_ID',
exception=None,
message='')),
('validate:failure', dict(
reason=None,
success=False,
validate_result=[mock.Mock(id='VOLUME_ID', status='in-use')],
result='VOLUME_ID',
exception=exc.InvalidSpec,
message="The volume VOLUME should be in 'available' "
"status but is in 'in-use' status.")),
('validate:driver_failure', dict(
reason=None,
success=False,
validate_result=exc.InternalError(message='BANG.'),
result='FID',
exception=exc.InternalError,
message='BANG.')),
('validate:not_found', dict(
reason=None,
success=False,
validate_result=exc.InternalError(code=404, message='BANG.'),
result='FID',
exception=exc.InvalidSpec,
message="The specified volume 'VOLUME' could not be found.")),
('create:success', dict(
reason='create',
success=True,
validate_result=[mock.Mock(id='VOLUME_ID', status='available')],
result='VOLUME_ID',
exception=None,
message='')),
('create:driver_failure', dict(
reason='create',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message='Failed in creating server: BANG.')),
('create:not_found', dict(
reason='create',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message="Failed in creating server: BANG.")),
]
def setUp(self):
super(TestVolumeValidation, self).setUp()
bdm_v2 = [
{
'volume_size': 1,
'uuid': '6ce0be68',
'source_type': 'volume',
'destination_type': 'volume',
'boot_index': 0,
},
]
volume_spec = {
'type': 'os.nova.server',
'version': '1.0',
'properties': {
'flavor': 'FLAV',
'name': 'FAKE_SERVER_NAME',
'security_groups': ['HIGH_SECURITY_GROUP'],
'block_device_mapping_v2': bdm_v2,
}
}
self.vc = mock.Mock()
self.profile = server.ServerProfile('t', volume_spec)
self.profile._block_storageclient = self.vc
def test_validation(self):
self.vc.volume_get.side_effect = self.validate_result
node = mock.Mock(id='NODE_ID', physical_id='NOVA_ID')
volume = 'VOLUME'
if self.success:
res = self.profile._validate_volume(node, volume, self.reason)
self.assertIsNotNone(res)
self.assertEqual(self.result, res.id)
else:
ex = self.assertRaises(self.exception,
self.profile._validate_volume,
node, volume, self.reason)
self.assertEqual(self.message, six.text_type(ex))
self.vc.volume_get.assert_called_once_with(volume)
class TestKeypairValidation(base.SenlinTestCase):
scenarios = [
('validate:success', dict(
reason=None,
success=True,
validate_result=[mock.Mock(id='KEY_ID')],
result='KEY_ID',
exception=None,
message='')),
('validate:driver_failure', dict(
reason=None,
success=False,
validate_result=exc.InternalError(message='BANG.'),
result='FID',
exception=exc.InternalError,
message='BANG.')),
('validate:not_found', dict(
reason=None,
success=False,
validate_result=exc.InternalError(code=404, message='BANG.'),
result='FID',
exception=exc.InvalidSpec,
message="The specified key_name 'KEY' could not be found.")),
('create:success', dict(
reason='create',
success=True,
validate_result=[mock.Mock(id='IMAGE_ID')],
result='IMAGE_ID',
exception=None,
message='')),
('create:driver_failure', dict(
reason='create',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message='Failed in creating server: BANG.')),
('create:not_found', dict(
reason='create',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message="Failed in creating server: BANG.")),
('update:success', dict(
reason='update',
success=True,
validate_result=[mock.Mock(id='KEY_ID')],
result='KEY_ID',
exception=None,
message='')),
('update:driver_failure', dict(
reason='update',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': BANG.")),
('update:not_found', dict(
reason='update',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': BANG.")),
]
def setUp(self):
super(TestKeypairValidation, self).setUp()
self.cc = mock.Mock()
self.profile = server.ServerProfile('t', spec)
self.profile._computeclient = self.cc
def test_validation(self):
self.cc.keypair_find.side_effect = self.validate_result
node = mock.Mock(id='NODE_ID', physical_id='NOVA_ID')
key = 'KEY'
if self.success:
res = self.profile._validate_keypair(node, key, self.reason)
self.assertIsNotNone(res)
self.assertEqual(self.result, res.id)
else:
ex = self.assertRaises(self.exception,
self.profile._validate_keypair,
node, key, self.reason)
self.assertEqual(self.message, six.text_type(ex))
self.cc.keypair_find.assert_called_once_with(key, False)
class TestNetworkValidation(base.SenlinTestCase):
scenarios = [
('validate:net-n:port-n:fixed_ip-n:sgroups-n', dict(
reason=None,
success=True,
inputs={'port': 'PORT'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[],
result={'port': 'PORT_ID'},
exception=None,
message='')),
('validate:net-y:port-n:fixed_ip-n:sgroups-y', dict(
reason=None,
success=True,
inputs={'network': 'NET', 'security_groups': ['default']},
net_result=[mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[mock.Mock(id='SG_ID')],
floating_result=[],
result={'network': 'NET_ID', 'security_groups': ['SG_ID']},
exception=None,
message='')),
('validate:net-y:port-n:fixed_ip-n:sgroups-n:floating_net-y', dict(
reason=None,
success=True,
inputs={'network': 'NET', 'floating_network': 'NET'},
net_result=[mock.Mock(id='NET_ID'), mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'floating_network': 'NET_ID'},
exception=None,
message='')),
('validate:net-y:port-n:fixed_ip-n:floating_net-y:floating_ip-y', dict(
reason=None,
success=True,
inputs={'network': 'NET', 'floating_network': 'NET',
'floating_ip': 'FLOATINGIP'},
net_result=[mock.Mock(id='NET_ID'), mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[mock.Mock(id='FLOATINGIP_ID', status='INACTIVE')],
result={'network': 'NET_ID', 'floating_network': 'NET_ID',
'floating_ip_id': 'FLOATINGIP_ID',
'floating_ip': 'FLOATINGIP'},
exception=None,
message='')),
('validate:net-y:port-n:fixed_ip-y:sgroups-n', dict(
reason=None,
success=True,
inputs={'network': 'NET', 'fixed_ip': 'FIXED_IP'},
net_result=[mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'fixed_ip': 'FIXED_IP'},
exception=None,
message='')),
('validate:net-f:port-y:fixed_ip-n:sgroups-n', dict(
reason=None,
success=False,
inputs={'network': 'NET', 'port': 'PORT'},
net_result=[exc.InternalError(message='NET Failure')],
port_result=[],
sg_result=[],
floating_result=[],
result={},
exception=exc.InvalidSpec,
message='NET Failure')),
('validate:net-n:port-f:fixed_ip-n', dict(
reason=None,
success=False,
inputs={'port': 'PORT'},
net_result=[],
port_result=[exc.InternalError(message='PORT Failure')],
sg_result=[],
floating_result=[],
result={},
exception=exc.InvalidSpec,
message='PORT Failure')),
('validate:net-n:port-active:fixed_ip-n', dict(
reason=None,
success=False,
inputs={'port': 'PORT'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='ACTIVE')],
sg_result=[],
floating_result=[],
result={},
exception=exc.InvalidSpec,
message='The status of the port PORT must be DOWN')),
('validate:net-n:port-y:fixed_ip-n:floating_net-n:floating_ip-y', dict(
reason=None,
success=False,
inputs={'port': 'PORT', 'floating_ip': 'FLOATINGIP'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[mock.Mock(id='FLOATINGIP_ID', status='INACTIVE')],
result={},
exception=exc.InvalidSpec,
message='Must specify a network to create floating IP')),
('validate:net-n:port-y:fixed_ip-n:floating_ip-active', dict(
reason=None,
success=False,
inputs={'port': 'PORT', 'floating_network': 'NET',
'floating_ip': 'FLOATINGIP'},
net_result=[mock.Mock(id='NET_ID')],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[mock.Mock(id='FLOATINGIP_ID', status='ACTIVE')],
result={},
exception=exc.InvalidSpec,
message='the floating IP FLOATINGIP has been used.')),
('validate:net-n:port-n:fixed_ip-n', dict(
reason=None,
success=False,
inputs={'fixed_ip': 'FIXED_IP'},
net_result=[],
port_result=[],
sg_result=[],
floating_result=[],
result={},
exception=exc.InvalidSpec,
message="One of 'port' and 'network' must be provided")),
('validate:net-n:port-y:fixed_ip-y', dict(
reason=None,
success=False,
inputs={'port': 'PORT', 'fixed_ip': 'FIXED_IP'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[],
result={},
exception=exc.InvalidSpec,
message=("The 'port' property and the 'fixed_ip' property cannot "
"be specified at the same time"))),
('create:net-y:port-y:fixed_ip-n', dict(
reason='create',
success=True,
inputs={'network': 'NET', 'port': 'PORT'},
net_result=[mock.Mock(id='NET_ID')],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'port': 'PORT_ID'},
exception=None,
message='')),
('create:net-y:port-n:fixed_ip-y', dict(
reason='create',
success=True,
inputs={'network': 'NET', 'fixed_ip': 'FIXED_IP'},
net_result=[mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'fixed_ip': 'FIXED_IP'},
exception=None,
message='')),
('create:net-y:port-n:fixed_ip-n:sgroups-y', dict(
reason='create',
success=True,
inputs={'network': 'NET', 'security_groups': ['default']},
net_result=[mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[mock.Mock(id='SG_ID')],
floating_result=[],
result={'network': 'NET_ID', 'security_groups': ['SG_ID']},
exception=None,
message='')),
('create:net-y:port-n:fixed_ip-n:sgroups-n:floating_net-y', dict(
reason=None,
success=True,
inputs={'network': 'NET', 'floating_network': 'NET'},
net_result=[mock.Mock(id='NET_ID'), mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'floating_network': 'NET_ID'},
exception=None,
message='')),
('create:net-f:port-y:fixed_ip-n', dict(
reason='create',
success=False,
inputs={'network': 'NET', 'port': 'PORT'},
net_result=[exc.InternalError(message='NET Failure')],
port_result=[],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceCreation,
message='Failed in creating server: NET Failure.')),
('create:net-n:port-f:fixed_ip-n', dict(
reason='create',
success=False,
inputs={'port': 'PORT'},
net_result=[],
port_result=[exc.InternalError(message='PORT Failure')],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceCreation,
message='Failed in creating server: PORT Failure.')),
('create:net-n:port-active:fixed_ip-n', dict(
reason='create',
success=False,
inputs={'port': 'PORT'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='ACTIVE')],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceCreation,
message=('Failed in creating server: The status of the port PORT '
'must be DOWN.'))),
('create:net-n:port-n:fixed_ip-n', dict(
reason='create',
success=False,
inputs={'fixed_ip': 'FIXED_IP'},
net_result=[],
port_result=[],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceCreation,
message=("Failed in creating server: One of 'port' "
"and 'network' must be provided."))),
('create:net-n:port-y:fixed_ip-y', dict(
reason='create',
success=False,
inputs={'port': 'PORT', 'fixed_ip': 'FIXED_IP'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceCreation,
message=("Failed in creating server: The 'port' property and the "
"'fixed_ip' property cannot be specified at the same "
"time."))),
('update:net-y:port-y:fixed_ip-n', dict(
reason='update',
success=True,
inputs={'network': 'NET', 'port': 'PORT'},
net_result=[mock.Mock(id='NET_ID')],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'port': 'PORT_ID'},
exception=None,
message='')),
('update:net-y:port-n:fixed_ip-y', dict(
reason='update',
success=True,
inputs={'network': 'NET', 'fixed_ip': 'FIXED_IP'},
net_result=[mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID',
'fixed_ip': 'FIXED_IP'},
exception=None,
message='')),
('update:net-y:port-n:fixed_ip-n:sgroups-y', dict(
reason='create',
success=True,
inputs={'network': 'NET', 'security_groups': ['default']},
net_result=[mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[mock.Mock(id='SG_ID')],
floating_result=[],
result={'network': 'NET_ID', 'security_groups': ['SG_ID']},
exception=None,
message='')),
('update:net-y:port-n:fixed_ip-n:sgroups-n:floating_net-y', dict(
reason=None,
success=True,
inputs={'network': 'NET', 'floating_network': 'NET'},
net_result=[mock.Mock(id='NET_ID'), mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'floating_network': 'NET_ID'},
exception=None,
message='')),
('update:net-f:port-y:fixed_ip-n', dict(
reason='update',
success=False,
inputs={'network': 'NET', 'port': 'PORT'},
net_result=[exc.InternalError(message='NET Failure')],
port_result=[],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': NET Failure.")),
('update:net-n:port-f:fixed_ip-n', dict(
reason='update',
success=False,
inputs={'port': 'PORT'},
net_result=[],
port_result=[exc.InternalError(message='PORT Failure')],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': PORT Failure.")),
('update:net-n:port-active:fixed_ip-n', dict(
reason='update',
success=False,
inputs={'port': 'PORT'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='ACTIVE')],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceUpdate,
message=("Failed in updating server 'NOVA_ID': The status of the "
"port PORT must be DOWN."))),
('update:net-n:port-n:fixed_ip-n', dict(
reason='update',
success=False,
inputs={'fixed_ip': 'FIXED_IP'},
net_result=[],
port_result=[],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceUpdate,
message=("Failed in updating server 'NOVA_ID': One of 'port' "
"and 'network' must be provided."))),
('update:net-n:port-y:fixed_ip-y', dict(
reason='update',
success=False,
inputs={'port': 'PORT', 'fixed_ip': 'FIXED_IP'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceUpdate,
message=("Failed in updating server 'NOVA_ID': The 'port' "
"property and the 'fixed_ip' property cannot be "
"specified at the same time."))),
]
def setUp(self):
super(TestNetworkValidation, self).setUp()
self.nc = mock.Mock()
self.profile = server.ServerProfile('t', spec)
self.profile._networkclient = self.nc
def test_validation(self):
self.nc.network_get.side_effect = self.net_result
self.nc.port_find.side_effect = self.port_result
self.nc.security_group_find.side_effect = self.sg_result
self.nc.floatingip_find.side_effect = self.floating_result
obj = mock.Mock(physical_id='NOVA_ID')
if self.success:
res = self.profile._validate_network(obj, self.inputs, self.reason)
self.assertEqual(self.result, res)
else:
ex = self.assertRaises(self.exception,
self.profile._validate_network,
obj, self.inputs, self.reason)
self.assertEqual(self.message, six.text_type(ex))
if self.net_result:
self.nc.network_get.assert_called_with('NET')
if self.port_result:
self.nc.port_find.assert_called_once_with('PORT')
if self.sg_result:
self.nc.security_group_find.assert_called_once_with('default')
if self.floating_result:
self.nc.floatingip_find.assert_called_once_with('FLOATINGIP')
class TestNovaServerValidate(base.SenlinTestCase):
def setUp(self):
super(TestNovaServerValidate, self).setUp()
self.context = utils.dummy_context()
def test_do_validate_all_passed(self):
profile = server.ServerProfile('t', spec)
mock_az = self.patchobject(profile, '_validate_az')
mock_flavor = self.patchobject(profile, '_validate_flavor')
mock_image = self.patchobject(profile, '_validate_image')
mock_keypair = self.patchobject(profile, '_validate_keypair')
mock_network = self.patchobject(profile, '_validate_network')
obj = mock.Mock()
res = profile.do_validate(obj)
properties = spec['properties']
self.assertTrue(res)
mock_az.assert_called_once_with(obj, properties['availability_zone'])
mock_flavor.assert_called_once_with(obj, properties['flavor'])
mock_image.assert_called_once_with(obj, properties['image'])
mock_keypair.assert_called_once_with(obj, properties['key_name'])
mock_network.assert_called_once_with(obj, properties['networks'][0])
| 38.07906 | 79 | 0.537287 | [
"Apache-2.0"
] | BoTranVan/senlin | senlin/tests/unit/profiles/test_nova_server_validate.py | 35,642 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.