body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
68b7cb9992dba3bd90edc7cd1c50a6890357d88409b441b8598c9ea2905a55e7 | @pytest.mark.parametrize('so', [2, 3, 4, 5])
def test_fd_indices(self, so):
'\n Test that shifted derivative have Integer offset after indexification.\n '
grid = Grid((10,))
x = grid.dimensions[0]
x0 = (x + (0.5 * x.spacing))
u = Function(name='u', grid=grid, space_order=so)
dx = indexify(u.dx(x0=x0).evaluate)
for f in retrieve_indexed(dx):
assert (len(f.indices[0].atoms(Float)) == 0) | Test that shifted derivative have Integer offset after indexification. | tests/test_derivatives.py | test_fd_indices | felipeaugustogudes/devito | 204 | python | @pytest.mark.parametrize('so', [2, 3, 4, 5])
def test_fd_indices(self, so):
'\n \n '
grid = Grid((10,))
x = grid.dimensions[0]
x0 = (x + (0.5 * x.spacing))
u = Function(name='u', grid=grid, space_order=so)
dx = indexify(u.dx(x0=x0).evaluate)
for f in retrieve_indexed(dx):
assert (len(f.indices[0].atoms(Float)) == 0) | @pytest.mark.parametrize('so', [2, 3, 4, 5])
def test_fd_indices(self, so):
'\n \n '
grid = Grid((10,))
x = grid.dimensions[0]
x0 = (x + (0.5 * x.spacing))
u = Function(name='u', grid=grid, space_order=so)
dx = indexify(u.dx(x0=x0).evaluate)
for f in retrieve_indexed(dx):
assert (len(f.indices[0].atoms(Float)) == 0)<|docstring|>Test that shifted derivative have Integer offset after indexification.<|endoftext|> |
71f61ff80801ce27fbf574bbae8fa455138ff8b4b51c4c7a7bde8b2dd5ca4fe8 | @pytest.mark.parametrize('SymbolType, dim', [(Function, x), (Function, y), (TimeFunction, x), (TimeFunction, y), (TimeFunction, t)])
def test_stencil_derivative(self, SymbolType, dim):
'Test symbolic behaviour when expanding stencil derivatives'
i = dim(self.grid)
u = SymbolType(name='u', grid=self.grid)
u.data[:] = 66.6
di = u.diff(i)
dii = u.diff(i, i)
assert (isinstance(di, Derivative) and isinstance(dii, Derivative))
s_di = di.as_finite_difference([(i - i.spacing), i])
s_dii = dii.as_finite_difference([(i - i.spacing), i, (i + i.spacing)])
assert ((len(s_di.args) == 2) and (len(s_dii.args) == 3))
u_di = s_di.args[0].args[1]
u_dii = s_di.args[0].args[1]
assert ((u_di.grid.shape == self.shape) and (u_dii.grid.shape == self.shape))
assert ((u_di.shape == u.shape) and (u_dii.shape == u.shape))
assert np.allclose(u_di.data, 66.6)
assert np.allclose(u_dii.data, 66.6) | Test symbolic behaviour when expanding stencil derivatives | tests/test_derivatives.py | test_stencil_derivative | felipeaugustogudes/devito | 204 | python | @pytest.mark.parametrize('SymbolType, dim', [(Function, x), (Function, y), (TimeFunction, x), (TimeFunction, y), (TimeFunction, t)])
def test_stencil_derivative(self, SymbolType, dim):
i = dim(self.grid)
u = SymbolType(name='u', grid=self.grid)
u.data[:] = 66.6
di = u.diff(i)
dii = u.diff(i, i)
assert (isinstance(di, Derivative) and isinstance(dii, Derivative))
s_di = di.as_finite_difference([(i - i.spacing), i])
s_dii = dii.as_finite_difference([(i - i.spacing), i, (i + i.spacing)])
assert ((len(s_di.args) == 2) and (len(s_dii.args) == 3))
u_di = s_di.args[0].args[1]
u_dii = s_di.args[0].args[1]
assert ((u_di.grid.shape == self.shape) and (u_dii.grid.shape == self.shape))
assert ((u_di.shape == u.shape) and (u_dii.shape == u.shape))
assert np.allclose(u_di.data, 66.6)
assert np.allclose(u_dii.data, 66.6) | @pytest.mark.parametrize('SymbolType, dim', [(Function, x), (Function, y), (TimeFunction, x), (TimeFunction, y), (TimeFunction, t)])
def test_stencil_derivative(self, SymbolType, dim):
i = dim(self.grid)
u = SymbolType(name='u', grid=self.grid)
u.data[:] = 66.6
di = u.diff(i)
dii = u.diff(i, i)
assert (isinstance(di, Derivative) and isinstance(dii, Derivative))
s_di = di.as_finite_difference([(i - i.spacing), i])
s_dii = dii.as_finite_difference([(i - i.spacing), i, (i + i.spacing)])
assert ((len(s_di.args) == 2) and (len(s_dii.args) == 3))
u_di = s_di.args[0].args[1]
u_dii = s_di.args[0].args[1]
assert ((u_di.grid.shape == self.shape) and (u_dii.grid.shape == self.shape))
assert ((u_di.shape == u.shape) and (u_dii.shape == u.shape))
assert np.allclose(u_di.data, 66.6)
assert np.allclose(u_dii.data, 66.6)<|docstring|>Test symbolic behaviour when expanding stencil derivatives<|endoftext|> |
d8670bbd20435c342075e455859b0ecadf578b874cc4f06174f1ef48e54fd960 | @pytest.mark.parametrize('SymbolType, derivative, dim', [(Function, 'dx2', 3), (Function, 'dy2', 3), (TimeFunction, 'dx2', 3), (TimeFunction, 'dy2', 3), (TimeFunction, 'dt', 2)])
def test_preformed_derivatives(self, SymbolType, derivative, dim):
'Test the stencil expressions provided by devito objects'
u = SymbolType(name='u', grid=self.grid, time_order=2, space_order=2)
expr = getattr(u, derivative)
assert (len(expr.evaluate.args) == dim) | Test the stencil expressions provided by devito objects | tests/test_derivatives.py | test_preformed_derivatives | felipeaugustogudes/devito | 204 | python | @pytest.mark.parametrize('SymbolType, derivative, dim', [(Function, 'dx2', 3), (Function, 'dy2', 3), (TimeFunction, 'dx2', 3), (TimeFunction, 'dy2', 3), (TimeFunction, 'dt', 2)])
def test_preformed_derivatives(self, SymbolType, derivative, dim):
u = SymbolType(name='u', grid=self.grid, time_order=2, space_order=2)
expr = getattr(u, derivative)
assert (len(expr.evaluate.args) == dim) | @pytest.mark.parametrize('SymbolType, derivative, dim', [(Function, 'dx2', 3), (Function, 'dy2', 3), (TimeFunction, 'dx2', 3), (TimeFunction, 'dy2', 3), (TimeFunction, 'dt', 2)])
def test_preformed_derivatives(self, SymbolType, derivative, dim):
u = SymbolType(name='u', grid=self.grid, time_order=2, space_order=2)
expr = getattr(u, derivative)
assert (len(expr.evaluate.args) == dim)<|docstring|>Test the stencil expressions provided by devito objects<|endoftext|> |
c1ed21a70b0729f05c8184d6a80c4e6a57fcaf4dd89821fb1588d6c0525542fa | @pytest.mark.parametrize('derivative, dim', [('dx', x), ('dy', y), ('dz', z)])
@pytest.mark.parametrize('order', [1, 2, 4, 6, 8, 10, 12, 14, 16])
def test_derivatives_space(self, derivative, dim, order):
'Test first derivative expressions against native sympy'
dim = dim(self.grid)
u = TimeFunction(name='u', grid=self.grid, time_order=2, space_order=order)
expr = getattr(u, derivative).evaluate
width = int((order / 2))
if (order <= 2):
indices = [dim, (dim + dim.spacing)]
else:
indices = [(dim + (i * dim.spacing)) for i in range((- width), (width + 1))]
s_expr = u.diff(dim).as_finite_difference(indices).evalf(_PRECISION)
assert (simplify((expr - s_expr)) == 0)
assert (type(expr) == EvalDerivative)
expr1 = s_expr.func(*expr.args)
assert (expr1 == s_expr) | Test first derivative expressions against native sympy | tests/test_derivatives.py | test_derivatives_space | felipeaugustogudes/devito | 204 | python | @pytest.mark.parametrize('derivative, dim', [('dx', x), ('dy', y), ('dz', z)])
@pytest.mark.parametrize('order', [1, 2, 4, 6, 8, 10, 12, 14, 16])
def test_derivatives_space(self, derivative, dim, order):
dim = dim(self.grid)
u = TimeFunction(name='u', grid=self.grid, time_order=2, space_order=order)
expr = getattr(u, derivative).evaluate
width = int((order / 2))
if (order <= 2):
indices = [dim, (dim + dim.spacing)]
else:
indices = [(dim + (i * dim.spacing)) for i in range((- width), (width + 1))]
s_expr = u.diff(dim).as_finite_difference(indices).evalf(_PRECISION)
assert (simplify((expr - s_expr)) == 0)
assert (type(expr) == EvalDerivative)
expr1 = s_expr.func(*expr.args)
assert (expr1 == s_expr) | @pytest.mark.parametrize('derivative, dim', [('dx', x), ('dy', y), ('dz', z)])
@pytest.mark.parametrize('order', [1, 2, 4, 6, 8, 10, 12, 14, 16])
def test_derivatives_space(self, derivative, dim, order):
dim = dim(self.grid)
u = TimeFunction(name='u', grid=self.grid, time_order=2, space_order=order)
expr = getattr(u, derivative).evaluate
width = int((order / 2))
if (order <= 2):
indices = [dim, (dim + dim.spacing)]
else:
indices = [(dim + (i * dim.spacing)) for i in range((- width), (width + 1))]
s_expr = u.diff(dim).as_finite_difference(indices).evalf(_PRECISION)
assert (simplify((expr - s_expr)) == 0)
assert (type(expr) == EvalDerivative)
expr1 = s_expr.func(*expr.args)
assert (expr1 == s_expr)<|docstring|>Test first derivative expressions against native sympy<|endoftext|> |
221fe578d64b2e6227a1fa324912cbdc92cb729143a0bcab8557056223ffcd44 | @pytest.mark.parametrize('derivative, dim', [('dx2', x), ('dy2', y), ('dz2', z)])
@pytest.mark.parametrize('order', [2, 4, 6, 8, 10, 12, 14, 16])
def test_second_derivatives_space(self, derivative, dim, order):
'\n Test second derivative expressions against native sympy.\n '
dim = dim(self.grid)
u = TimeFunction(name='u', grid=self.grid, time_order=2, space_order=order)
expr = getattr(u, derivative).evaluate
width = int((order / 2))
indices = [(dim + (i * dim.spacing)) for i in range((- width), (width + 1))]
s_expr = u.diff(dim, dim).as_finite_difference(indices).evalf(_PRECISION)
assert (simplify((expr - s_expr)) == 0)
assert (type(expr) == EvalDerivative)
expr1 = s_expr.func(*expr.args)
assert (expr1 == s_expr) | Test second derivative expressions against native sympy. | tests/test_derivatives.py | test_second_derivatives_space | felipeaugustogudes/devito | 204 | python | @pytest.mark.parametrize('derivative, dim', [('dx2', x), ('dy2', y), ('dz2', z)])
@pytest.mark.parametrize('order', [2, 4, 6, 8, 10, 12, 14, 16])
def test_second_derivatives_space(self, derivative, dim, order):
'\n \n '
dim = dim(self.grid)
u = TimeFunction(name='u', grid=self.grid, time_order=2, space_order=order)
expr = getattr(u, derivative).evaluate
width = int((order / 2))
indices = [(dim + (i * dim.spacing)) for i in range((- width), (width + 1))]
s_expr = u.diff(dim, dim).as_finite_difference(indices).evalf(_PRECISION)
assert (simplify((expr - s_expr)) == 0)
assert (type(expr) == EvalDerivative)
expr1 = s_expr.func(*expr.args)
assert (expr1 == s_expr) | @pytest.mark.parametrize('derivative, dim', [('dx2', x), ('dy2', y), ('dz2', z)])
@pytest.mark.parametrize('order', [2, 4, 6, 8, 10, 12, 14, 16])
def test_second_derivatives_space(self, derivative, dim, order):
'\n \n '
dim = dim(self.grid)
u = TimeFunction(name='u', grid=self.grid, time_order=2, space_order=order)
expr = getattr(u, derivative).evaluate
width = int((order / 2))
indices = [(dim + (i * dim.spacing)) for i in range((- width), (width + 1))]
s_expr = u.diff(dim, dim).as_finite_difference(indices).evalf(_PRECISION)
assert (simplify((expr - s_expr)) == 0)
assert (type(expr) == EvalDerivative)
expr1 = s_expr.func(*expr.args)
assert (expr1 == s_expr)<|docstring|>Test second derivative expressions against native sympy.<|endoftext|> |
830c753f63a905185ef08ce0c9010dfba8df7c412b561c0032a0c37c694b02d2 | @pytest.mark.parametrize('space_order', [2, 4, 6, 8, 10, 12, 14, 16, 18, 20])
@pytest.mark.parametrize('derivative', ['dx', 'dxl', 'dxr', 'dx2'])
def test_fd_space(self, derivative, space_order):
'\n This test compares the discrete finite-difference scheme against polynomials\n For a given order p, the finite difference scheme should\n be exact for polynomials of order p.\n '
nx = 100
xx = np.linspace((- 1), 1, nx)
dx = (xx[1] - xx[0])
grid = Grid(shape=(nx,), dtype=np.float32)
x = grid.dimensions[0]
u = Function(name='u', grid=grid, space_order=space_order)
du = Function(name='du', grid=grid, space_order=space_order)
coeffs = np.ones((space_order,), dtype=np.float32)
polynome = sum([(coeffs[i] * (x ** i)) for i in range(0, space_order)])
polyvalues = np.array([polynome.subs(x, xi) for xi in xx], np.float32)
u.data[:] = polyvalues
Dpolynome = (diff(diff(polynome)) if (derivative == 'dx2') else diff(polynome))
Dpolyvalues = np.array([Dpolynome.subs(x, xi) for xi in xx], np.float32)
u_deriv = getattr(u, derivative)
stencil = Eq(du, u_deriv)
op = Operator(stencil, subs={x.spacing: dx})
op.apply()
space_border = space_order
error = abs((du.data[space_border:(- space_border)] - Dpolyvalues[space_border:(- space_border)]))
assert np.isclose(np.mean(error), 0.0, atol=0.001) | This test compares the discrete finite-difference scheme against polynomials
For a given order p, the finite difference scheme should
be exact for polynomials of order p. | tests/test_derivatives.py | test_fd_space | felipeaugustogudes/devito | 204 | python | @pytest.mark.parametrize('space_order', [2, 4, 6, 8, 10, 12, 14, 16, 18, 20])
@pytest.mark.parametrize('derivative', ['dx', 'dxl', 'dxr', 'dx2'])
def test_fd_space(self, derivative, space_order):
'\n This test compares the discrete finite-difference scheme against polynomials\n For a given order p, the finite difference scheme should\n be exact for polynomials of order p.\n '
nx = 100
xx = np.linspace((- 1), 1, nx)
dx = (xx[1] - xx[0])
grid = Grid(shape=(nx,), dtype=np.float32)
x = grid.dimensions[0]
u = Function(name='u', grid=grid, space_order=space_order)
du = Function(name='du', grid=grid, space_order=space_order)
coeffs = np.ones((space_order,), dtype=np.float32)
polynome = sum([(coeffs[i] * (x ** i)) for i in range(0, space_order)])
polyvalues = np.array([polynome.subs(x, xi) for xi in xx], np.float32)
u.data[:] = polyvalues
Dpolynome = (diff(diff(polynome)) if (derivative == 'dx2') else diff(polynome))
Dpolyvalues = np.array([Dpolynome.subs(x, xi) for xi in xx], np.float32)
u_deriv = getattr(u, derivative)
stencil = Eq(du, u_deriv)
op = Operator(stencil, subs={x.spacing: dx})
op.apply()
space_border = space_order
error = abs((du.data[space_border:(- space_border)] - Dpolyvalues[space_border:(- space_border)]))
assert np.isclose(np.mean(error), 0.0, atol=0.001) | @pytest.mark.parametrize('space_order', [2, 4, 6, 8, 10, 12, 14, 16, 18, 20])
@pytest.mark.parametrize('derivative', ['dx', 'dxl', 'dxr', 'dx2'])
def test_fd_space(self, derivative, space_order):
'\n This test compares the discrete finite-difference scheme against polynomials\n For a given order p, the finite difference scheme should\n be exact for polynomials of order p.\n '
nx = 100
xx = np.linspace((- 1), 1, nx)
dx = (xx[1] - xx[0])
grid = Grid(shape=(nx,), dtype=np.float32)
x = grid.dimensions[0]
u = Function(name='u', grid=grid, space_order=space_order)
du = Function(name='du', grid=grid, space_order=space_order)
coeffs = np.ones((space_order,), dtype=np.float32)
polynome = sum([(coeffs[i] * (x ** i)) for i in range(0, space_order)])
polyvalues = np.array([polynome.subs(x, xi) for xi in xx], np.float32)
u.data[:] = polyvalues
Dpolynome = (diff(diff(polynome)) if (derivative == 'dx2') else diff(polynome))
Dpolyvalues = np.array([Dpolynome.subs(x, xi) for xi in xx], np.float32)
u_deriv = getattr(u, derivative)
stencil = Eq(du, u_deriv)
op = Operator(stencil, subs={x.spacing: dx})
op.apply()
space_border = space_order
error = abs((du.data[space_border:(- space_border)] - Dpolyvalues[space_border:(- space_border)]))
assert np.isclose(np.mean(error), 0.0, atol=0.001)<|docstring|>This test compares the discrete finite-difference scheme against polynomials
For a given order p, the finite difference scheme should
be exact for polynomials of order p.<|endoftext|> |
dd023cfefaf520be94d98f23a0ed1f46b080bfad0640f466f0f0ef94147477f0 | @pytest.mark.parametrize('space_order', [2, 4, 6, 8, 10, 12, 14, 16, 18, 20])
@pytest.mark.parametrize('stagger', [centered, left, right])
def test_fd_space_staggered(self, space_order, stagger):
'\n This test compares the discrete finite-difference scheme against polynomials\n For a given order p, the finite difference scheme should\n be exact for polynomials of order p\n '
nx = 101
xx = np.linspace((- 1), 1, nx)
dx = (xx[1] - xx[0])
grid = Grid(shape=(nx,), dtype=np.float32)
x = grid.dimensions[0]
if (stagger == left):
off = (- 0.5)
side = (- x)
xx2 = (xx + (off * dx))
elif (stagger == right):
off = 0.5
side = x
xx2 = (xx + (off * dx))
else:
side = NODE
xx2 = xx
u = Function(name='u', grid=grid, space_order=space_order, staggered=side)
du = Function(name='du', grid=grid, space_order=space_order, staggered=side)
coeffs = np.ones(((space_order - 1),), dtype=np.float32)
polynome = sum([(coeffs[i] * (x ** i)) for i in range(0, (space_order - 1))])
polyvalues = np.array([polynome.subs(x, xi) for xi in xx2], np.float32)
u.data[:] = polyvalues
Dpolynome = diff(polynome)
Dpolyvalues = np.array([Dpolynome.subs(x, xi) for xi in xx2], np.float32)
stencil = Eq(du, u.dx)
op = Operator(stencil, subs={x.spacing: dx})
op.apply()
space_border = space_order
error = abs((du.data[space_border:(- space_border)] - Dpolyvalues[space_border:(- space_border)]))
assert np.isclose(np.mean(error), 0.0, atol=0.001) | This test compares the discrete finite-difference scheme against polynomials
For a given order p, the finite difference scheme should
be exact for polynomials of order p | tests/test_derivatives.py | test_fd_space_staggered | felipeaugustogudes/devito | 204 | python | @pytest.mark.parametrize('space_order', [2, 4, 6, 8, 10, 12, 14, 16, 18, 20])
@pytest.mark.parametrize('stagger', [centered, left, right])
def test_fd_space_staggered(self, space_order, stagger):
'\n This test compares the discrete finite-difference scheme against polynomials\n For a given order p, the finite difference scheme should\n be exact for polynomials of order p\n '
nx = 101
xx = np.linspace((- 1), 1, nx)
dx = (xx[1] - xx[0])
grid = Grid(shape=(nx,), dtype=np.float32)
x = grid.dimensions[0]
if (stagger == left):
off = (- 0.5)
side = (- x)
xx2 = (xx + (off * dx))
elif (stagger == right):
off = 0.5
side = x
xx2 = (xx + (off * dx))
else:
side = NODE
xx2 = xx
u = Function(name='u', grid=grid, space_order=space_order, staggered=side)
du = Function(name='du', grid=grid, space_order=space_order, staggered=side)
coeffs = np.ones(((space_order - 1),), dtype=np.float32)
polynome = sum([(coeffs[i] * (x ** i)) for i in range(0, (space_order - 1))])
polyvalues = np.array([polynome.subs(x, xi) for xi in xx2], np.float32)
u.data[:] = polyvalues
Dpolynome = diff(polynome)
Dpolyvalues = np.array([Dpolynome.subs(x, xi) for xi in xx2], np.float32)
stencil = Eq(du, u.dx)
op = Operator(stencil, subs={x.spacing: dx})
op.apply()
space_border = space_order
error = abs((du.data[space_border:(- space_border)] - Dpolyvalues[space_border:(- space_border)]))
assert np.isclose(np.mean(error), 0.0, atol=0.001) | @pytest.mark.parametrize('space_order', [2, 4, 6, 8, 10, 12, 14, 16, 18, 20])
@pytest.mark.parametrize('stagger', [centered, left, right])
def test_fd_space_staggered(self, space_order, stagger):
'\n This test compares the discrete finite-difference scheme against polynomials\n For a given order p, the finite difference scheme should\n be exact for polynomials of order p\n '
nx = 101
xx = np.linspace((- 1), 1, nx)
dx = (xx[1] - xx[0])
grid = Grid(shape=(nx,), dtype=np.float32)
x = grid.dimensions[0]
if (stagger == left):
off = (- 0.5)
side = (- x)
xx2 = (xx + (off * dx))
elif (stagger == right):
off = 0.5
side = x
xx2 = (xx + (off * dx))
else:
side = NODE
xx2 = xx
u = Function(name='u', grid=grid, space_order=space_order, staggered=side)
du = Function(name='du', grid=grid, space_order=space_order, staggered=side)
coeffs = np.ones(((space_order - 1),), dtype=np.float32)
polynome = sum([(coeffs[i] * (x ** i)) for i in range(0, (space_order - 1))])
polyvalues = np.array([polynome.subs(x, xi) for xi in xx2], np.float32)
u.data[:] = polyvalues
Dpolynome = diff(polynome)
Dpolyvalues = np.array([Dpolynome.subs(x, xi) for xi in xx2], np.float32)
stencil = Eq(du, u.dx)
op = Operator(stencil, subs={x.spacing: dx})
op.apply()
space_border = space_order
error = abs((du.data[space_border:(- space_border)] - Dpolyvalues[space_border:(- space_border)]))
assert np.isclose(np.mean(error), 0.0, atol=0.001)<|docstring|>This test compares the discrete finite-difference scheme against polynomials
For a given order p, the finite difference scheme should
be exact for polynomials of order p<|endoftext|> |
c26b3c3620dcdfe22900ed0df8591cd84791d9532a3ae194855a588cc1da194f | def test_new_x0_eval_at(self):
'\n Make sure that explicitly set x0 does not get overwritten by eval_at.\n '
grid = Grid((10,))
x = grid.dimensions[0]
u = Function(name='u', grid=grid, space_order=2)
v = Function(name='v', grid=grid, space_order=2)
assert (u.dx(x0=(x - (x.spacing / 2)))._eval_at(v).x0 == {x: (x - (x.spacing / 2))}) | Make sure that explicitly set x0 does not get overwritten by eval_at. | tests/test_derivatives.py | test_new_x0_eval_at | felipeaugustogudes/devito | 204 | python | def test_new_x0_eval_at(self):
'\n \n '
grid = Grid((10,))
x = grid.dimensions[0]
u = Function(name='u', grid=grid, space_order=2)
v = Function(name='v', grid=grid, space_order=2)
assert (u.dx(x0=(x - (x.spacing / 2)))._eval_at(v).x0 == {x: (x - (x.spacing / 2))}) | def test_new_x0_eval_at(self):
'\n \n '
grid = Grid((10,))
x = grid.dimensions[0]
u = Function(name='u', grid=grid, space_order=2)
v = Function(name='v', grid=grid, space_order=2)
assert (u.dx(x0=(x - (x.spacing / 2)))._eval_at(v).x0 == {x: (x - (x.spacing / 2))})<|docstring|>Make sure that explicitly set x0 does not get overwritten by eval_at.<|endoftext|> |
df8e2897b168f4dd74aa9fc8e3b1a44a69a01810b6d3dc363110efba3b6bff70 | def test_subsampled_fd(self):
'\n Test that the symbolic interface is working for space subsampled\n functions.\n '
nt = 19
grid = Grid(shape=(12, 12), extent=(11, 11))
u = TimeFunction(name='u', grid=grid, save=nt, space_order=2)
assert (grid.time_dim in u.indices)
dims = tuple([ConditionalDimension((d.name + 'sub'), parent=d, factor=2) for d in u.grid.dimensions])
grid2 = Grid((6, 6), dimensions=dims, extent=(10, 10))
u2 = TimeFunction(name='u2', grid=grid2, save=nt, space_order=1)
for i in range(nt):
for j in range(u2.data_with_halo.shape[2]):
u2.data_with_halo[(i, :, j)] = np.arange(u2.data_with_halo.shape[2])
eqns = [Eq(u.forward, (u + 1.0)), Eq(u2.forward, u2.dx)]
op = Operator(eqns)
op.apply(time_M=(nt - 2))
assert np.allclose(u.data[(- 1)], (nt - 1))
assert np.allclose(u2.data[1], 0.5) | Test that the symbolic interface is working for space subsampled
functions. | tests/test_derivatives.py | test_subsampled_fd | felipeaugustogudes/devito | 204 | python | def test_subsampled_fd(self):
'\n Test that the symbolic interface is working for space subsampled\n functions.\n '
nt = 19
grid = Grid(shape=(12, 12), extent=(11, 11))
u = TimeFunction(name='u', grid=grid, save=nt, space_order=2)
assert (grid.time_dim in u.indices)
dims = tuple([ConditionalDimension((d.name + 'sub'), parent=d, factor=2) for d in u.grid.dimensions])
grid2 = Grid((6, 6), dimensions=dims, extent=(10, 10))
u2 = TimeFunction(name='u2', grid=grid2, save=nt, space_order=1)
for i in range(nt):
for j in range(u2.data_with_halo.shape[2]):
u2.data_with_halo[(i, :, j)] = np.arange(u2.data_with_halo.shape[2])
eqns = [Eq(u.forward, (u + 1.0)), Eq(u2.forward, u2.dx)]
op = Operator(eqns)
op.apply(time_M=(nt - 2))
assert np.allclose(u.data[(- 1)], (nt - 1))
assert np.allclose(u2.data[1], 0.5) | def test_subsampled_fd(self):
'\n Test that the symbolic interface is working for space subsampled\n functions.\n '
nt = 19
grid = Grid(shape=(12, 12), extent=(11, 11))
u = TimeFunction(name='u', grid=grid, save=nt, space_order=2)
assert (grid.time_dim in u.indices)
dims = tuple([ConditionalDimension((d.name + 'sub'), parent=d, factor=2) for d in u.grid.dimensions])
grid2 = Grid((6, 6), dimensions=dims, extent=(10, 10))
u2 = TimeFunction(name='u2', grid=grid2, save=nt, space_order=1)
for i in range(nt):
for j in range(u2.data_with_halo.shape[2]):
u2.data_with_halo[(i, :, j)] = np.arange(u2.data_with_halo.shape[2])
eqns = [Eq(u.forward, (u + 1.0)), Eq(u2.forward, u2.dx)]
op = Operator(eqns)
op.apply(time_M=(nt - 2))
assert np.allclose(u.data[(- 1)], (nt - 1))
assert np.allclose(u2.data[1], 0.5)<|docstring|>Test that the symbolic interface is working for space subsampled
functions.<|endoftext|> |
908e58961bb5734e1d3e27562098aea949856664944d9e96f666ff9daedc478f | @pytest.mark.parametrize('so', [2, 5, 8])
def test_all_shortcuts(self, so):
'\n Test that verify that all fd shortcuts are functional.\n '
grid = Grid(shape=(10, 10, 10))
f = Function(name='f', grid=grid, space_order=so)
g = TimeFunction(name='g', grid=grid, space_order=so)
for fd in f._fd:
assert getattr(f, fd)
for fd in g._fd:
assert getattr(g, fd) | Test that verify that all fd shortcuts are functional. | tests/test_derivatives.py | test_all_shortcuts | felipeaugustogudes/devito | 204 | python | @pytest.mark.parametrize('so', [2, 5, 8])
def test_all_shortcuts(self, so):
'\n \n '
grid = Grid(shape=(10, 10, 10))
f = Function(name='f', grid=grid, space_order=so)
g = TimeFunction(name='g', grid=grid, space_order=so)
for fd in f._fd:
assert getattr(f, fd)
for fd in g._fd:
assert getattr(g, fd) | @pytest.mark.parametrize('so', [2, 5, 8])
def test_all_shortcuts(self, so):
'\n \n '
grid = Grid(shape=(10, 10, 10))
f = Function(name='f', grid=grid, space_order=so)
g = TimeFunction(name='g', grid=grid, space_order=so)
for fd in f._fd:
assert getattr(f, fd)
for fd in g._fd:
assert getattr(g, fd)<|docstring|>Test that verify that all fd shortcuts are functional.<|endoftext|> |
f1c690b3eb83afd810d6d3b9fc4bde537c26969c3a065e6c0b0d744933c17f1d | def __init__(self, resp, decode_type='utf-8'):
'\n get data from ResultSet\n '
self._decode_type = decode_type
self._resp = resp
self._data_set_wrapper = None
if (self._resp.data is not None):
self._data_set_wrapper = DataSetWrapper(resp.data, self._decode_type) | get data from ResultSet | python/nebula2/data/ResultSet.py | __init__ | taeb3/nebula-clients | 15 | python | def __init__(self, resp, decode_type='utf-8'):
'\n \n '
self._decode_type = decode_type
self._resp = resp
self._data_set_wrapper = None
if (self._resp.data is not None):
self._data_set_wrapper = DataSetWrapper(resp.data, self._decode_type) | def __init__(self, resp, decode_type='utf-8'):
'\n \n '
self._decode_type = decode_type
self._resp = resp
self._data_set_wrapper = None
if (self._resp.data is not None):
self._data_set_wrapper = DataSetWrapper(resp.data, self._decode_type)<|docstring|>get data from ResultSet<|endoftext|> |
57d324efdf7d560826691846caef381f6068987464f1fd7ad8735aff5328590a | def latency(self):
'\n unit us\n '
return self._resp.latency_in_us | unit us | python/nebula2/data/ResultSet.py | latency | taeb3/nebula-clients | 15 | python | def latency(self):
'\n \n '
return self._resp.latency_in_us | def latency(self):
'\n \n '
return self._resp.latency_in_us<|docstring|>unit us<|endoftext|> |
be92a8129d8e910759ce91072a60d073408b9bb83d47230280cb221d01e52d5f | def keys(self):
'\n get colNames\n '
if (self._data_set_wrapper is None):
return []
return self._data_set_wrapper.get_col_names() | get colNames | python/nebula2/data/ResultSet.py | keys | taeb3/nebula-clients | 15 | python | def keys(self):
'\n \n '
if (self._data_set_wrapper is None):
return []
return self._data_set_wrapper.get_col_names() | def keys(self):
'\n \n '
if (self._data_set_wrapper is None):
return []
return self._data_set_wrapper.get_col_names()<|docstring|>get colNames<|endoftext|> |
e52423be80aa647a92495f1c19a51b7901d3a89fe2cda7f5578ff018ea70d045 | def row_size(self):
'\n get one row size\n '
if (self._data_set_wrapper is None):
return 0
return len(self._data_set_wrapper.get_rows()) | get one row size | python/nebula2/data/ResultSet.py | row_size | taeb3/nebula-clients | 15 | python | def row_size(self):
'\n \n '
if (self._data_set_wrapper is None):
return 0
return len(self._data_set_wrapper.get_rows()) | def row_size(self):
'\n \n '
if (self._data_set_wrapper is None):
return 0
return len(self._data_set_wrapper.get_rows())<|docstring|>get one row size<|endoftext|> |
d95d64110d217f5445e726ce9541cedfaa0cb7473b566e455e6cbde6a537e22f | def col_size(self):
'\n get one col size\n '
if (self._data_set_wrapper is None):
return 0
return len(self._data_set_wrapper.get_col_names()) | get one col size | python/nebula2/data/ResultSet.py | col_size | taeb3/nebula-clients | 15 | python | def col_size(self):
'\n \n '
if (self._data_set_wrapper is None):
return 0
return len(self._data_set_wrapper.get_col_names()) | def col_size(self):
'\n \n '
if (self._data_set_wrapper is None):
return 0
return len(self._data_set_wrapper.get_col_names())<|docstring|>get one col size<|endoftext|> |
dbdc9cccb93da470c5c29f7fcac6f8e8f91e5705dde127b5c654a52c526736ac | def get_row_types(self):
'\n Get row types\n :param empty\n :return: list<int>\n ttypes.Value.__EMPTY__ = 0\n ttypes.Value.NVAL = 1\n ttypes.Value.BVAL = 2\n ttypes.Value.IVAL = 3\n ttypes.Value.FVAL = 4\n ttypes.Value.SVAL = 5\n ttypes.Value.DVAL = 6\n ttypes.Value.TVAL = 7\n ttypes.Value.DTVAL = 8\n ttypes.Value.VVAL = 9\n ttypes.Value.EVAL = 10\n ttypes.Value.PVAL = 11\n ttypes.Value.LVAL = 12\n ttypes.Value.MVAL = 13\n ttypes.Value.UVAL = 14\n ttypes.Value.GVAL = 15\n '
if (self._data_set_wrapper is None):
return []
return self._data_set_wrapper.get_row_types() | Get row types
:param empty
:return: list<int>
ttypes.Value.__EMPTY__ = 0
ttypes.Value.NVAL = 1
ttypes.Value.BVAL = 2
ttypes.Value.IVAL = 3
ttypes.Value.FVAL = 4
ttypes.Value.SVAL = 5
ttypes.Value.DVAL = 6
ttypes.Value.TVAL = 7
ttypes.Value.DTVAL = 8
ttypes.Value.VVAL = 9
ttypes.Value.EVAL = 10
ttypes.Value.PVAL = 11
ttypes.Value.LVAL = 12
ttypes.Value.MVAL = 13
ttypes.Value.UVAL = 14
ttypes.Value.GVAL = 15 | python/nebula2/data/ResultSet.py | get_row_types | taeb3/nebula-clients | 15 | python | def get_row_types(self):
'\n Get row types\n :param empty\n :return: list<int>\n ttypes.Value.__EMPTY__ = 0\n ttypes.Value.NVAL = 1\n ttypes.Value.BVAL = 2\n ttypes.Value.IVAL = 3\n ttypes.Value.FVAL = 4\n ttypes.Value.SVAL = 5\n ttypes.Value.DVAL = 6\n ttypes.Value.TVAL = 7\n ttypes.Value.DTVAL = 8\n ttypes.Value.VVAL = 9\n ttypes.Value.EVAL = 10\n ttypes.Value.PVAL = 11\n ttypes.Value.LVAL = 12\n ttypes.Value.MVAL = 13\n ttypes.Value.UVAL = 14\n ttypes.Value.GVAL = 15\n '
if (self._data_set_wrapper is None):
return []
return self._data_set_wrapper.get_row_types() | def get_row_types(self):
'\n Get row types\n :param empty\n :return: list<int>\n ttypes.Value.__EMPTY__ = 0\n ttypes.Value.NVAL = 1\n ttypes.Value.BVAL = 2\n ttypes.Value.IVAL = 3\n ttypes.Value.FVAL = 4\n ttypes.Value.SVAL = 5\n ttypes.Value.DVAL = 6\n ttypes.Value.TVAL = 7\n ttypes.Value.DTVAL = 8\n ttypes.Value.VVAL = 9\n ttypes.Value.EVAL = 10\n ttypes.Value.PVAL = 11\n ttypes.Value.LVAL = 12\n ttypes.Value.MVAL = 13\n ttypes.Value.UVAL = 14\n ttypes.Value.GVAL = 15\n '
if (self._data_set_wrapper is None):
return []
return self._data_set_wrapper.get_row_types()<|docstring|>Get row types
:param empty
:return: list<int>
ttypes.Value.__EMPTY__ = 0
ttypes.Value.NVAL = 1
ttypes.Value.BVAL = 2
ttypes.Value.IVAL = 3
ttypes.Value.FVAL = 4
ttypes.Value.SVAL = 5
ttypes.Value.DVAL = 6
ttypes.Value.TVAL = 7
ttypes.Value.DTVAL = 8
ttypes.Value.VVAL = 9
ttypes.Value.EVAL = 10
ttypes.Value.PVAL = 11
ttypes.Value.LVAL = 12
ttypes.Value.MVAL = 13
ttypes.Value.UVAL = 14
ttypes.Value.GVAL = 15<|endoftext|> |
72dfcff4fb50374735105fcf6646d905a23a7f98130c8305ab9369227bbb691b | def row_values(self, row_index):
'\n Get row values\n :param index: the Record index\n :return: list<ValueWrapper>\n '
if (self._data_set_wrapper is None):
return []
return self._data_set_wrapper.row_values(row_index) | Get row values
:param index: the Record index
:return: list<ValueWrapper> | python/nebula2/data/ResultSet.py | row_values | taeb3/nebula-clients | 15 | python | def row_values(self, row_index):
'\n Get row values\n :param index: the Record index\n :return: list<ValueWrapper>\n '
if (self._data_set_wrapper is None):
return []
return self._data_set_wrapper.row_values(row_index) | def row_values(self, row_index):
'\n Get row values\n :param index: the Record index\n :return: list<ValueWrapper>\n '
if (self._data_set_wrapper is None):
return []
return self._data_set_wrapper.row_values(row_index)<|docstring|>Get row values
:param index: the Record index
:return: list<ValueWrapper><|endoftext|> |
eb7d9d0fa1594eeb7f709256f06b6c9dc35924e432ff8b4994fb3e2910c20ed8 | def column_values(self, key):
'\n get column values\n :param key: the col name\n :return: list<ValueWrapper>\n '
if (self._data_set_wrapper is None):
return []
return self._data_set_wrapper.column_values(key) | get column values
:param key: the col name
:return: list<ValueWrapper> | python/nebula2/data/ResultSet.py | column_values | taeb3/nebula-clients | 15 | python | def column_values(self, key):
'\n get column values\n :param key: the col name\n :return: list<ValueWrapper>\n '
if (self._data_set_wrapper is None):
return []
return self._data_set_wrapper.column_values(key) | def column_values(self, key):
'\n get column values\n :param key: the col name\n :return: list<ValueWrapper>\n '
if (self._data_set_wrapper is None):
return []
return self._data_set_wrapper.column_values(key)<|docstring|>get column values
:param key: the col name
:return: list<ValueWrapper><|endoftext|> |
ff59138696e13017cfe5fc80a523f80ab96dab9826197512bbc47dbb33d924f8 | def rows(self):
'\n get all rows\n :param key: empty\n :return: list<Row>\n '
if (self._data_set_wrapper is None):
return []
return self._data_set_wrapper.get_rows() | get all rows
:param key: empty
:return: list<Row> | python/nebula2/data/ResultSet.py | rows | taeb3/nebula-clients | 15 | python | def rows(self):
'\n get all rows\n :param key: empty\n :return: list<Row>\n '
if (self._data_set_wrapper is None):
return []
return self._data_set_wrapper.get_rows() | def rows(self):
'\n get all rows\n :param key: empty\n :return: list<Row>\n '
if (self._data_set_wrapper is None):
return []
return self._data_set_wrapper.get_rows()<|docstring|>get all rows
:param key: empty
:return: list<Row><|endoftext|> |
ca9501c93273f4bcac82aa66d92c1399a86cb14e0a1cf9381d16243fd8b0a249 | @pytest.fixture
def clean_server(initialize_test_dir):
'\n Put testing server in a defined state: only minimal metadata (instruments)\n and records present\n '
config = json.load(open(SERVER_CONFIG_YAML, 'r'))
redproj = redcap.Project(config['api_url'], config['api_token'], lazy=True)
default_datadict = pd.DataFrame(data=[(['record_id', 'my_first_instrument', 'text', 'Record ID'] + ([''] * 14))], columns=map_header_csv_to_json)
redproj.import_metadata(default_datadict, format='csv')
redproj = redcap.Project(config['api_url'], config['api_token'], lazy=False)
default_records = pd.DataFrame(columns=['record_id', 'my_first_instrument_complete'])
redproj.import_records(default_records, format='csv', return_format='json', overwrite='overwrite') | Put testing server in a defined state: only minimal metadata (instruments)
and records present | redcap_bridge/test_redcap/test_server_interface.py | clean_server | killianrochet/DigLabTools | 2 | python | @pytest.fixture
def clean_server(initialize_test_dir):
'\n Put testing server in a defined state: only minimal metadata (instruments)\n and records present\n '
config = json.load(open(SERVER_CONFIG_YAML, 'r'))
redproj = redcap.Project(config['api_url'], config['api_token'], lazy=True)
default_datadict = pd.DataFrame(data=[(['record_id', 'my_first_instrument', 'text', 'Record ID'] + ([] * 14))], columns=map_header_csv_to_json)
redproj.import_metadata(default_datadict, format='csv')
redproj = redcap.Project(config['api_url'], config['api_token'], lazy=False)
default_records = pd.DataFrame(columns=['record_id', 'my_first_instrument_complete'])
redproj.import_records(default_records, format='csv', return_format='json', overwrite='overwrite') | @pytest.fixture
def clean_server(initialize_test_dir):
'\n Put testing server in a defined state: only minimal metadata (instruments)\n and records present\n '
config = json.load(open(SERVER_CONFIG_YAML, 'r'))
redproj = redcap.Project(config['api_url'], config['api_token'], lazy=True)
default_datadict = pd.DataFrame(data=[(['record_id', 'my_first_instrument', 'text', 'Record ID'] + ([] * 14))], columns=map_header_csv_to_json)
redproj.import_metadata(default_datadict, format='csv')
redproj = redcap.Project(config['api_url'], config['api_token'], lazy=False)
default_records = pd.DataFrame(columns=['record_id', 'my_first_instrument_complete'])
redproj.import_records(default_records, format='csv', return_format='json', overwrite='overwrite')<|docstring|>Put testing server in a defined state: only minimal metadata (instruments)
and records present<|endoftext|> |
032da9ff25c933bcb194df46adb74db4ac2fa56c3d0002e72f6060d87cb15e7a | def test_upload_datadict(clean_server, initialize_test_dir):
'\n Test uploading a survey definition (datadict) csv to the server\n '
metadata_csv = ((test_directory / 'testfiles') / 'metadata.csv')
res = upload_datadict(metadata_csv, SERVER_CONFIG_YAML)
with open(metadata_csv) as f:
lines = f.readlines()
exp = (len(lines) - 1)
assert (exp == res) | Test uploading a survey definition (datadict) csv to the server | redcap_bridge/test_redcap/test_server_interface.py | test_upload_datadict | killianrochet/DigLabTools | 2 | python | def test_upload_datadict(clean_server, initialize_test_dir):
'\n \n '
metadata_csv = ((test_directory / 'testfiles') / 'metadata.csv')
res = upload_datadict(metadata_csv, SERVER_CONFIG_YAML)
with open(metadata_csv) as f:
lines = f.readlines()
exp = (len(lines) - 1)
assert (exp == res) | def test_upload_datadict(clean_server, initialize_test_dir):
'\n \n '
metadata_csv = ((test_directory / 'testfiles') / 'metadata.csv')
res = upload_datadict(metadata_csv, SERVER_CONFIG_YAML)
with open(metadata_csv) as f:
lines = f.readlines()
exp = (len(lines) - 1)
assert (exp == res)<|docstring|>Test uploading a survey definition (datadict) csv to the server<|endoftext|> |
3d467f9c5a79a2eea9eb58b382f5bbc085713643c8e8a33db26156828125a8ef | def test_upload_records(clean_server, initialize_test_dir):
'\n Test upload of records to the server\n\n TODO: Finally this test should test the corresponding redcap_bridge\n `upload_records` method instead of pycap itself\n '
config = json.load(open(SERVER_CONFIG_YAML, 'r'))
redproj = redcap.Project(config['api_url'], config['api_token'], lazy=False)
upload_datadict(((test_directory / 'testfiles') / 'metadata.csv'), SERVER_CONFIG_YAML)
uploaded_records = pd.read_csv(((test_directory / 'testfiles') / 'record.csv'), index_col=0, dtype='str')
redproj.import_records(uploaded_records, format='csv', overwrite='overwrite') | Test upload of records to the server
TODO: Finally this test should test the corresponding redcap_bridge
`upload_records` method instead of pycap itself | redcap_bridge/test_redcap/test_server_interface.py | test_upload_records | killianrochet/DigLabTools | 2 | python | def test_upload_records(clean_server, initialize_test_dir):
'\n Test upload of records to the server\n\n TODO: Finally this test should test the corresponding redcap_bridge\n `upload_records` method instead of pycap itself\n '
config = json.load(open(SERVER_CONFIG_YAML, 'r'))
redproj = redcap.Project(config['api_url'], config['api_token'], lazy=False)
upload_datadict(((test_directory / 'testfiles') / 'metadata.csv'), SERVER_CONFIG_YAML)
uploaded_records = pd.read_csv(((test_directory / 'testfiles') / 'record.csv'), index_col=0, dtype='str')
redproj.import_records(uploaded_records, format='csv', overwrite='overwrite') | def test_upload_records(clean_server, initialize_test_dir):
'\n Test upload of records to the server\n\n TODO: Finally this test should test the corresponding redcap_bridge\n `upload_records` method instead of pycap itself\n '
config = json.load(open(SERVER_CONFIG_YAML, 'r'))
redproj = redcap.Project(config['api_url'], config['api_token'], lazy=False)
upload_datadict(((test_directory / 'testfiles') / 'metadata.csv'), SERVER_CONFIG_YAML)
uploaded_records = pd.read_csv(((test_directory / 'testfiles') / 'record.csv'), index_col=0, dtype='str')
redproj.import_records(uploaded_records, format='csv', overwrite='overwrite')<|docstring|>Test upload of records to the server
TODO: Finally this test should test the corresponding redcap_bridge
`upload_records` method instead of pycap itself<|endoftext|> |
2df0251ee9c2e0ace78e54584455d40820d04be852fcb831d6ccf8d3029743ec | def test_download_records(clean_server, initialize_test_dir):
'\n Download datadict from server and compare to previously uploaded datadict\n '
original_metadata_csv = ((test_directory / 'testfiles') / 'metadata.csv')
upload_datadict(original_metadata_csv, SERVER_CONFIG_YAML)
downloaded_metadata_csv = ((test_directory / 'testfiles') / 'metadata_downloaded.csv')
download_datadict(downloaded_metadata_csv, SERVER_CONFIG_YAML)
import csv
original_reader = csv.reader(open(original_metadata_csv))
download_reader = csv.reader(open(downloaded_metadata_csv))
original_header = original_reader.__next__()
downloaded_header = download_reader.__next__()
for (oh, dh) in zip(original_header, downloaded_header):
assert (map_header_csv_to_json[oh] == dh)
for (oline, dline) in zip(original_reader, download_reader):
assert (oline == dline) | Download datadict from server and compare to previously uploaded datadict | redcap_bridge/test_redcap/test_server_interface.py | test_download_records | killianrochet/DigLabTools | 2 | python | def test_download_records(clean_server, initialize_test_dir):
'\n \n '
original_metadata_csv = ((test_directory / 'testfiles') / 'metadata.csv')
upload_datadict(original_metadata_csv, SERVER_CONFIG_YAML)
downloaded_metadata_csv = ((test_directory / 'testfiles') / 'metadata_downloaded.csv')
download_datadict(downloaded_metadata_csv, SERVER_CONFIG_YAML)
import csv
original_reader = csv.reader(open(original_metadata_csv))
download_reader = csv.reader(open(downloaded_metadata_csv))
original_header = original_reader.__next__()
downloaded_header = download_reader.__next__()
for (oh, dh) in zip(original_header, downloaded_header):
assert (map_header_csv_to_json[oh] == dh)
for (oline, dline) in zip(original_reader, download_reader):
assert (oline == dline) | def test_download_records(clean_server, initialize_test_dir):
'\n \n '
original_metadata_csv = ((test_directory / 'testfiles') / 'metadata.csv')
upload_datadict(original_metadata_csv, SERVER_CONFIG_YAML)
downloaded_metadata_csv = ((test_directory / 'testfiles') / 'metadata_downloaded.csv')
download_datadict(downloaded_metadata_csv, SERVER_CONFIG_YAML)
import csv
original_reader = csv.reader(open(original_metadata_csv))
download_reader = csv.reader(open(downloaded_metadata_csv))
original_header = original_reader.__next__()
downloaded_header = download_reader.__next__()
for (oh, dh) in zip(original_header, downloaded_header):
assert (map_header_csv_to_json[oh] == dh)
for (oline, dline) in zip(original_reader, download_reader):
assert (oline == dline)<|docstring|>Download datadict from server and compare to previously uploaded datadict<|endoftext|> |
02a30c0926f14ec92a4be728c45318a0a9b5dbd18b049df233716d4487465230 | def get_initializer(initializer_seed=42.0):
'Creates a `tf.initializers.glorot_normal` with the given seed.\n Args:\n initializer_seed: int, initializer seed.\n Returns:\n GlorotNormal initializer with seed = `initializer_seed`.\n '
return tf.keras.initializers.GlorotNormal(seed=initializer_seed) | Creates a `tf.initializers.glorot_normal` with the given seed.
Args:
initializer_seed: int, initializer seed.
Returns:
GlorotNormal initializer with seed = `initializer_seed`. | models/vocoder.py | get_initializer | Z-yq/TensorflowTTS | 50 | python | def get_initializer(initializer_seed=42.0):
'Creates a `tf.initializers.glorot_normal` with the given seed.\n Args:\n initializer_seed: int, initializer seed.\n Returns:\n GlorotNormal initializer with seed = `initializer_seed`.\n '
return tf.keras.initializers.GlorotNormal(seed=initializer_seed) | def get_initializer(initializer_seed=42.0):
'Creates a `tf.initializers.glorot_normal` with the given seed.\n Args:\n initializer_seed: int, initializer seed.\n Returns:\n GlorotNormal initializer with seed = `initializer_seed`.\n '
return tf.keras.initializers.GlorotNormal(seed=initializer_seed)<|docstring|>Creates a `tf.initializers.glorot_normal` with the given seed.
Args:
initializer_seed: int, initializer seed.
Returns:
GlorotNormal initializer with seed = `initializer_seed`.<|endoftext|> |
660092e7f31d61db19e83683c73551a2f17f721505f4625317b5b4dd8e155e2a | def __init__(self, input_feature='raw', num_mels=80, out_channels=80, kernel_size=7, filters=1024, use_bias=True, hop_size=0.016, sample_rate=8000, stack_kernel_size=3, stacks=5, nonlinear_activation='LeakyReLU', nonlinear_activation_params={'alpha': 0.2}, padding_type='REFLECT', use_final_nolinear_activation=True, is_weight_norm=False, initializer_seed=42, **kwargs):
'Init parameters for MelGAN Generator model.'
if (input_feature == 'raw'):
assert (out_channels == num_mels), 'out_channels must equal num_mels while input_feature is "raw"'
hop_size = int((sample_rate * hop_size))
if (input_feature != 'raw'):
upsample_scales = self.get_scales((hop_size // out_channels))
else:
upsample_scales = ([1] * 3)
self.out_channels = out_channels
self.kernel_size = kernel_size
self.num_mels = num_mels
self.filters = filters
self.use_bias = use_bias
self.upsample_scales = upsample_scales
self.stack_kernel_size = stack_kernel_size
self.stacks = stacks
self.nonlinear_activation = nonlinear_activation
self.nonlinear_activation_params = nonlinear_activation_params
self.padding_type = padding_type
self.use_final_nolinear_activation = use_final_nolinear_activation
self.is_weight_norm = is_weight_norm
self.initializer_seed = initializer_seed | Init parameters for MelGAN Generator model. | models/vocoder.py | __init__ | Z-yq/TensorflowTTS | 50 | python | def __init__(self, input_feature='raw', num_mels=80, out_channels=80, kernel_size=7, filters=1024, use_bias=True, hop_size=0.016, sample_rate=8000, stack_kernel_size=3, stacks=5, nonlinear_activation='LeakyReLU', nonlinear_activation_params={'alpha': 0.2}, padding_type='REFLECT', use_final_nolinear_activation=True, is_weight_norm=False, initializer_seed=42, **kwargs):
if (input_feature == 'raw'):
assert (out_channels == num_mels), 'out_channels must equal num_mels while input_feature is "raw"'
hop_size = int((sample_rate * hop_size))
if (input_feature != 'raw'):
upsample_scales = self.get_scales((hop_size // out_channels))
else:
upsample_scales = ([1] * 3)
self.out_channels = out_channels
self.kernel_size = kernel_size
self.num_mels = num_mels
self.filters = filters
self.use_bias = use_bias
self.upsample_scales = upsample_scales
self.stack_kernel_size = stack_kernel_size
self.stacks = stacks
self.nonlinear_activation = nonlinear_activation
self.nonlinear_activation_params = nonlinear_activation_params
self.padding_type = padding_type
self.use_final_nolinear_activation = use_final_nolinear_activation
self.is_weight_norm = is_weight_norm
self.initializer_seed = initializer_seed | def __init__(self, input_feature='raw', num_mels=80, out_channels=80, kernel_size=7, filters=1024, use_bias=True, hop_size=0.016, sample_rate=8000, stack_kernel_size=3, stacks=5, nonlinear_activation='LeakyReLU', nonlinear_activation_params={'alpha': 0.2}, padding_type='REFLECT', use_final_nolinear_activation=True, is_weight_norm=False, initializer_seed=42, **kwargs):
if (input_feature == 'raw'):
assert (out_channels == num_mels), 'out_channels must equal num_mels while input_feature is "raw"'
hop_size = int((sample_rate * hop_size))
if (input_feature != 'raw'):
upsample_scales = self.get_scales((hop_size // out_channels))
else:
upsample_scales = ([1] * 3)
self.out_channels = out_channels
self.kernel_size = kernel_size
self.num_mels = num_mels
self.filters = filters
self.use_bias = use_bias
self.upsample_scales = upsample_scales
self.stack_kernel_size = stack_kernel_size
self.stacks = stacks
self.nonlinear_activation = nonlinear_activation
self.nonlinear_activation_params = nonlinear_activation_params
self.padding_type = padding_type
self.use_final_nolinear_activation = use_final_nolinear_activation
self.is_weight_norm = is_weight_norm
self.initializer_seed = initializer_seed<|docstring|>Init parameters for MelGAN Generator model.<|endoftext|> |
93de186098f39a1bceabf428567ca8f0f90ca7705711df569f00f8f7350531ff | def __init__(self, dis_out_channels=1, dis_scales=3, dis_downsample_pooling='AveragePooling1D', dis_downsample_pooling_params={'pool_size': 4, 'strides': 2}, dis_kernel_sizes=[5, 3], dis_filters=32, dis_max_downsample_filters=1024, dis_use_bias=True, dis_downsample_scales=[2, 2, 2, 2], dis_nonlinear_activation='LeakyReLU', dis_nonlinear_activation_params={'alpha': 0.2}, dis_padding_type='REFLECT', dis_is_weight_norm=True, dis_initializer_seed=42, **kwargs):
'Init parameters for MelGAN Discriminator model.'
self.out_channels = dis_out_channels
self.scales = dis_scales
self.downsample_pooling = dis_downsample_pooling
self.downsample_pooling_params = dis_downsample_pooling_params
self.kernel_sizes = dis_kernel_sizes
self.filters = dis_filters
self.max_downsample_filters = dis_max_downsample_filters
self.use_bias = dis_use_bias
self.downsample_scales = dis_downsample_scales
self.nonlinear_activation = dis_nonlinear_activation
self.nonlinear_activation_params = dis_nonlinear_activation_params
self.padding_type = dis_padding_type
self.is_weight_norm = dis_is_weight_norm
self.initializer_seed = dis_initializer_seed | Init parameters for MelGAN Discriminator model. | models/vocoder.py | __init__ | Z-yq/TensorflowTTS | 50 | python | def __init__(self, dis_out_channels=1, dis_scales=3, dis_downsample_pooling='AveragePooling1D', dis_downsample_pooling_params={'pool_size': 4, 'strides': 2}, dis_kernel_sizes=[5, 3], dis_filters=32, dis_max_downsample_filters=1024, dis_use_bias=True, dis_downsample_scales=[2, 2, 2, 2], dis_nonlinear_activation='LeakyReLU', dis_nonlinear_activation_params={'alpha': 0.2}, dis_padding_type='REFLECT', dis_is_weight_norm=True, dis_initializer_seed=42, **kwargs):
self.out_channels = dis_out_channels
self.scales = dis_scales
self.downsample_pooling = dis_downsample_pooling
self.downsample_pooling_params = dis_downsample_pooling_params
self.kernel_sizes = dis_kernel_sizes
self.filters = dis_filters
self.max_downsample_filters = dis_max_downsample_filters
self.use_bias = dis_use_bias
self.downsample_scales = dis_downsample_scales
self.nonlinear_activation = dis_nonlinear_activation
self.nonlinear_activation_params = dis_nonlinear_activation_params
self.padding_type = dis_padding_type
self.is_weight_norm = dis_is_weight_norm
self.initializer_seed = dis_initializer_seed | def __init__(self, dis_out_channels=1, dis_scales=3, dis_downsample_pooling='AveragePooling1D', dis_downsample_pooling_params={'pool_size': 4, 'strides': 2}, dis_kernel_sizes=[5, 3], dis_filters=32, dis_max_downsample_filters=1024, dis_use_bias=True, dis_downsample_scales=[2, 2, 2, 2], dis_nonlinear_activation='LeakyReLU', dis_nonlinear_activation_params={'alpha': 0.2}, dis_padding_type='REFLECT', dis_is_weight_norm=True, dis_initializer_seed=42, **kwargs):
self.out_channels = dis_out_channels
self.scales = dis_scales
self.downsample_pooling = dis_downsample_pooling
self.downsample_pooling_params = dis_downsample_pooling_params
self.kernel_sizes = dis_kernel_sizes
self.filters = dis_filters
self.max_downsample_filters = dis_max_downsample_filters
self.use_bias = dis_use_bias
self.downsample_scales = dis_downsample_scales
self.nonlinear_activation = dis_nonlinear_activation
self.nonlinear_activation_params = dis_nonlinear_activation_params
self.padding_type = dis_padding_type
self.is_weight_norm = dis_is_weight_norm
self.initializer_seed = dis_initializer_seed<|docstring|>Init parameters for MelGAN Discriminator model.<|endoftext|> |
6402d9fc5e9d175d6682df7898edc2885d829ac5958c18b8ee3307c50a64b625 | def __init__(self, config, **kwargs):
'Initialize TFMelGANGenerator module.\n Args:\n config: config object of Melgan generator.\n '
super(TFMultiWindowGenerator, self).__init__(**kwargs)
assert (config.filters >= np.prod(config.upsample_scales))
assert ((config.filters % (2 ** len(config.upsample_scales))) == 0)
self.config = config
layers = []
layers += [TFReflectionPad1d(((config.kernel_size - 1) // 2), padding_type=config.padding_type, name='first_reflect_padding'), tf.keras.layers.Conv1D(filters=config.filters, kernel_size=config.kernel_size, use_bias=config.use_bias, kernel_initializer=get_initializer(config.initializer_seed))]
layers += [TFReflectionPad1d(((config.kernel_size - 1) // 2), padding_type=config.padding_type, name='second_reflect_padding'), tf.keras.layers.Conv1D(filters=config.filters, kernel_size=config.kernel_size, use_bias=config.use_bias, kernel_initializer=get_initializer(config.initializer_seed)), TFReflectionPad1d(((config.kernel_size - 1) // 2), padding_type=config.padding_type, name='third_reflect_padding'), tf.keras.layers.Conv1D(filters=config.filters, kernel_size=config.kernel_size, use_bias=config.use_bias, kernel_initializer=get_initializer(config.initializer_seed))]
for (i, upsample_scale) in enumerate(config.upsample_scales):
layers += [getattr(tf.keras.layers, config.nonlinear_activation)(**config.nonlinear_activation_params), TFConvTranspose1d(filters=(config.filters // (2 ** (i + 1))), kernel_size=(upsample_scale * 2), strides=upsample_scale, padding='same', is_weight_norm=config.is_weight_norm, initializer_seed=config.initializer_seed, name='conv_transpose_._{}'.format(i))]
for j in range(config.stacks):
layers += [TFResidualStack(kernel_size=config.stack_kernel_size, filters=max((config.filters // (2 ** (i + 1))), 128), dilation_rate=(config.stack_kernel_size ** j), use_bias=config.use_bias, nonlinear_activation=config.nonlinear_activation, nonlinear_activation_params=config.nonlinear_activation_params, is_weight_norm=config.is_weight_norm, initializer_seed=config.initializer_seed, name='residual_stack_._{}._._{}'.format(i, j))]
self.fc = tf.keras.layers.Dense(config.filters)
self.melgan_feature = tf.keras.Sequential(layers)
self.reshape1 = tf.keras.layers.Reshape([(- 1), ((self.config.window * config.filters) // 2)])
self.reshape2 = tf.keras.layers.Reshape([(- 1), (self.config.window * config.filters)])
self.reshape = tf.keras.layers.Reshape([(- 1), 1])
self.out_layer1 = tf.keras.layers.Conv1D(filters=self.config.out_channels, kernel_size=7, strides=1, padding='causal')
self.out_layer2 = tf.keras.layers.Dense(((self.config.window * self.config.out_channels) // 2))
self.out_layer3 = tf.keras.layers.Dense((self.config.window * self.config.out_channels))
self.out_c_score = tf.keras.layers.Conv1D(filters=3, kernel_size=41, padding='causal', activation='softmax') | Initialize TFMelGANGenerator module.
Args:
config: config object of Melgan generator. | models/vocoder.py | __init__ | Z-yq/TensorflowTTS | 50 | python | def __init__(self, config, **kwargs):
'Initialize TFMelGANGenerator module.\n Args:\n config: config object of Melgan generator.\n '
super(TFMultiWindowGenerator, self).__init__(**kwargs)
assert (config.filters >= np.prod(config.upsample_scales))
assert ((config.filters % (2 ** len(config.upsample_scales))) == 0)
self.config = config
layers = []
layers += [TFReflectionPad1d(((config.kernel_size - 1) // 2), padding_type=config.padding_type, name='first_reflect_padding'), tf.keras.layers.Conv1D(filters=config.filters, kernel_size=config.kernel_size, use_bias=config.use_bias, kernel_initializer=get_initializer(config.initializer_seed))]
layers += [TFReflectionPad1d(((config.kernel_size - 1) // 2), padding_type=config.padding_type, name='second_reflect_padding'), tf.keras.layers.Conv1D(filters=config.filters, kernel_size=config.kernel_size, use_bias=config.use_bias, kernel_initializer=get_initializer(config.initializer_seed)), TFReflectionPad1d(((config.kernel_size - 1) // 2), padding_type=config.padding_type, name='third_reflect_padding'), tf.keras.layers.Conv1D(filters=config.filters, kernel_size=config.kernel_size, use_bias=config.use_bias, kernel_initializer=get_initializer(config.initializer_seed))]
for (i, upsample_scale) in enumerate(config.upsample_scales):
layers += [getattr(tf.keras.layers, config.nonlinear_activation)(**config.nonlinear_activation_params), TFConvTranspose1d(filters=(config.filters // (2 ** (i + 1))), kernel_size=(upsample_scale * 2), strides=upsample_scale, padding='same', is_weight_norm=config.is_weight_norm, initializer_seed=config.initializer_seed, name='conv_transpose_._{}'.format(i))]
for j in range(config.stacks):
layers += [TFResidualStack(kernel_size=config.stack_kernel_size, filters=max((config.filters // (2 ** (i + 1))), 128), dilation_rate=(config.stack_kernel_size ** j), use_bias=config.use_bias, nonlinear_activation=config.nonlinear_activation, nonlinear_activation_params=config.nonlinear_activation_params, is_weight_norm=config.is_weight_norm, initializer_seed=config.initializer_seed, name='residual_stack_._{}._._{}'.format(i, j))]
self.fc = tf.keras.layers.Dense(config.filters)
self.melgan_feature = tf.keras.Sequential(layers)
self.reshape1 = tf.keras.layers.Reshape([(- 1), ((self.config.window * config.filters) // 2)])
self.reshape2 = tf.keras.layers.Reshape([(- 1), (self.config.window * config.filters)])
self.reshape = tf.keras.layers.Reshape([(- 1), 1])
self.out_layer1 = tf.keras.layers.Conv1D(filters=self.config.out_channels, kernel_size=7, strides=1, padding='causal')
self.out_layer2 = tf.keras.layers.Dense(((self.config.window * self.config.out_channels) // 2))
self.out_layer3 = tf.keras.layers.Dense((self.config.window * self.config.out_channels))
self.out_c_score = tf.keras.layers.Conv1D(filters=3, kernel_size=41, padding='causal', activation='softmax') | def __init__(self, config, **kwargs):
'Initialize TFMelGANGenerator module.\n Args:\n config: config object of Melgan generator.\n '
super(TFMultiWindowGenerator, self).__init__(**kwargs)
assert (config.filters >= np.prod(config.upsample_scales))
assert ((config.filters % (2 ** len(config.upsample_scales))) == 0)
self.config = config
layers = []
layers += [TFReflectionPad1d(((config.kernel_size - 1) // 2), padding_type=config.padding_type, name='first_reflect_padding'), tf.keras.layers.Conv1D(filters=config.filters, kernel_size=config.kernel_size, use_bias=config.use_bias, kernel_initializer=get_initializer(config.initializer_seed))]
layers += [TFReflectionPad1d(((config.kernel_size - 1) // 2), padding_type=config.padding_type, name='second_reflect_padding'), tf.keras.layers.Conv1D(filters=config.filters, kernel_size=config.kernel_size, use_bias=config.use_bias, kernel_initializer=get_initializer(config.initializer_seed)), TFReflectionPad1d(((config.kernel_size - 1) // 2), padding_type=config.padding_type, name='third_reflect_padding'), tf.keras.layers.Conv1D(filters=config.filters, kernel_size=config.kernel_size, use_bias=config.use_bias, kernel_initializer=get_initializer(config.initializer_seed))]
for (i, upsample_scale) in enumerate(config.upsample_scales):
layers += [getattr(tf.keras.layers, config.nonlinear_activation)(**config.nonlinear_activation_params), TFConvTranspose1d(filters=(config.filters // (2 ** (i + 1))), kernel_size=(upsample_scale * 2), strides=upsample_scale, padding='same', is_weight_norm=config.is_weight_norm, initializer_seed=config.initializer_seed, name='conv_transpose_._{}'.format(i))]
for j in range(config.stacks):
layers += [TFResidualStack(kernel_size=config.stack_kernel_size, filters=max((config.filters // (2 ** (i + 1))), 128), dilation_rate=(config.stack_kernel_size ** j), use_bias=config.use_bias, nonlinear_activation=config.nonlinear_activation, nonlinear_activation_params=config.nonlinear_activation_params, is_weight_norm=config.is_weight_norm, initializer_seed=config.initializer_seed, name='residual_stack_._{}._._{}'.format(i, j))]
self.fc = tf.keras.layers.Dense(config.filters)
self.melgan_feature = tf.keras.Sequential(layers)
self.reshape1 = tf.keras.layers.Reshape([(- 1), ((self.config.window * config.filters) // 2)])
self.reshape2 = tf.keras.layers.Reshape([(- 1), (self.config.window * config.filters)])
self.reshape = tf.keras.layers.Reshape([(- 1), 1])
self.out_layer1 = tf.keras.layers.Conv1D(filters=self.config.out_channels, kernel_size=7, strides=1, padding='causal')
self.out_layer2 = tf.keras.layers.Dense(((self.config.window * self.config.out_channels) // 2))
self.out_layer3 = tf.keras.layers.Dense((self.config.window * self.config.out_channels))
self.out_c_score = tf.keras.layers.Conv1D(filters=3, kernel_size=41, padding='causal', activation='softmax')<|docstring|>Initialize TFMelGANGenerator module.
Args:
config: config object of Melgan generator.<|endoftext|> |
803195b0435414255c2631d2072f376c220f2e9e89c3f7f2eca22987e6717e51 | def __init__(self, padding_size, padding_type='REFLECT', **kwargs):
'Initialize TFReflectionPad1d module.\n\n Args:\n padding_size (int)\n padding_type (str) ("CONSTANT", "REFLECT", or "SYMMETRIC". Default is "REFLECT")\n '
super().__init__(**kwargs)
self.padding_size = padding_size
self.padding_type = padding_type | Initialize TFReflectionPad1d module.
Args:
padding_size (int)
padding_type (str) ("CONSTANT", "REFLECT", or "SYMMETRIC". Default is "REFLECT") | models/vocoder.py | __init__ | Z-yq/TensorflowTTS | 50 | python | def __init__(self, padding_size, padding_type='REFLECT', **kwargs):
'Initialize TFReflectionPad1d module.\n\n Args:\n padding_size (int)\n padding_type (str) ("CONSTANT", "REFLECT", or "SYMMETRIC". Default is "REFLECT")\n '
super().__init__(**kwargs)
self.padding_size = padding_size
self.padding_type = padding_type | def __init__(self, padding_size, padding_type='REFLECT', **kwargs):
'Initialize TFReflectionPad1d module.\n\n Args:\n padding_size (int)\n padding_type (str) ("CONSTANT", "REFLECT", or "SYMMETRIC". Default is "REFLECT")\n '
super().__init__(**kwargs)
self.padding_size = padding_size
self.padding_type = padding_type<|docstring|>Initialize TFReflectionPad1d module.
Args:
padding_size (int)
padding_type (str) ("CONSTANT", "REFLECT", or "SYMMETRIC". Default is "REFLECT")<|endoftext|> |
3c74acd0416578c7b33ffb759c5d99a85fa986326ccca7c8314d1167cfd1f99f | def call(self, x):
'Calculate forward propagation.\n Args:\n x (Tensor): Input tensor (B, T, C).\n Returns:\n Tensor: Padded tensor (B, T + 2 * padding_size, C).\n '
return tf.pad(x, [[0, 0], [self.padding_size, self.padding_size], [0, 0]], self.padding_type) | Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, C).
Returns:
Tensor: Padded tensor (B, T + 2 * padding_size, C). | models/vocoder.py | call | Z-yq/TensorflowTTS | 50 | python | def call(self, x):
'Calculate forward propagation.\n Args:\n x (Tensor): Input tensor (B, T, C).\n Returns:\n Tensor: Padded tensor (B, T + 2 * padding_size, C).\n '
return tf.pad(x, [[0, 0], [self.padding_size, self.padding_size], [0, 0]], self.padding_type) | def call(self, x):
'Calculate forward propagation.\n Args:\n x (Tensor): Input tensor (B, T, C).\n Returns:\n Tensor: Padded tensor (B, T + 2 * padding_size, C).\n '
return tf.pad(x, [[0, 0], [self.padding_size, self.padding_size], [0, 0]], self.padding_type)<|docstring|>Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, C).
Returns:
Tensor: Padded tensor (B, T + 2 * padding_size, C).<|endoftext|> |
9f117eb18f11adb35165e00fdb7d8f235e0fc492c962f0fa5b2a39fefc4591b2 | def __init__(self, filters, kernel_size, strides, padding, is_weight_norm, initializer_seed, **kwargs):
'Initialize TFConvTranspose1d( module.\n Args:\n filters (int): Number of filters.\n kernel_size (int): kernel size.\n strides (int): Stride width.\n padding (str): Padding type ("same" or "valid").\n '
super().__init__(**kwargs)
self.conv1 = tf.keras.layers.Conv1D(filters=(filters * strides), kernel_size=kernel_size, padding='same', kernel_initializer=get_initializer(initializer_seed))
self.up = tf.keras.layers.Reshape([(- 1), filters])
self.conv2 = tf.keras.layers.Conv1D(filters=filters, kernel_size=kernel_size, padding='same', kernel_initializer=get_initializer(initializer_seed))
if is_weight_norm:
self.conv1d_transpose = WeightNormalization(self.conv1d_transpose) | Initialize TFConvTranspose1d( module.
Args:
filters (int): Number of filters.
kernel_size (int): kernel size.
strides (int): Stride width.
padding (str): Padding type ("same" or "valid"). | models/vocoder.py | __init__ | Z-yq/TensorflowTTS | 50 | python | def __init__(self, filters, kernel_size, strides, padding, is_weight_norm, initializer_seed, **kwargs):
'Initialize TFConvTranspose1d( module.\n Args:\n filters (int): Number of filters.\n kernel_size (int): kernel size.\n strides (int): Stride width.\n padding (str): Padding type ("same" or "valid").\n '
super().__init__(**kwargs)
self.conv1 = tf.keras.layers.Conv1D(filters=(filters * strides), kernel_size=kernel_size, padding='same', kernel_initializer=get_initializer(initializer_seed))
self.up = tf.keras.layers.Reshape([(- 1), filters])
self.conv2 = tf.keras.layers.Conv1D(filters=filters, kernel_size=kernel_size, padding='same', kernel_initializer=get_initializer(initializer_seed))
if is_weight_norm:
self.conv1d_transpose = WeightNormalization(self.conv1d_transpose) | def __init__(self, filters, kernel_size, strides, padding, is_weight_norm, initializer_seed, **kwargs):
'Initialize TFConvTranspose1d( module.\n Args:\n filters (int): Number of filters.\n kernel_size (int): kernel size.\n strides (int): Stride width.\n padding (str): Padding type ("same" or "valid").\n '
super().__init__(**kwargs)
self.conv1 = tf.keras.layers.Conv1D(filters=(filters * strides), kernel_size=kernel_size, padding='same', kernel_initializer=get_initializer(initializer_seed))
self.up = tf.keras.layers.Reshape([(- 1), filters])
self.conv2 = tf.keras.layers.Conv1D(filters=filters, kernel_size=kernel_size, padding='same', kernel_initializer=get_initializer(initializer_seed))
if is_weight_norm:
self.conv1d_transpose = WeightNormalization(self.conv1d_transpose)<|docstring|>Initialize TFConvTranspose1d( module.
Args:
filters (int): Number of filters.
kernel_size (int): kernel size.
strides (int): Stride width.
padding (str): Padding type ("same" or "valid").<|endoftext|> |
691d7e7cc461e055cb8042bc8c75f254f20014cc70e76563c97ef1adda19cb8c | def call(self, x):
"Calculate forward propagation.\n Args:\n x (Tensor): Input tensor (B, T, C).\n Returns:\n Tensor: Output tensor (B, T', C').\n "
x = self.conv1(x)
x = self.up(x)
x = self.conv2(x)
return x | Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, C).
Returns:
Tensor: Output tensor (B, T', C'). | models/vocoder.py | call | Z-yq/TensorflowTTS | 50 | python | def call(self, x):
"Calculate forward propagation.\n Args:\n x (Tensor): Input tensor (B, T, C).\n Returns:\n Tensor: Output tensor (B, T', C').\n "
x = self.conv1(x)
x = self.up(x)
x = self.conv2(x)
return x | def call(self, x):
"Calculate forward propagation.\n Args:\n x (Tensor): Input tensor (B, T, C).\n Returns:\n Tensor: Output tensor (B, T', C').\n "
x = self.conv1(x)
x = self.up(x)
x = self.conv2(x)
return x<|docstring|>Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, C).
Returns:
Tensor: Output tensor (B, T', C').<|endoftext|> |
9509d1d6ce8e2c226ece017ebcc44bfaa6fc576fa21d47387f6c045df5d69022 | def __init__(self, filters, kernel_size, strides, padding, is_weight_norm, initializer_seed, **kwargs):
'Initialize TFConvTranspose1d( module.\n Args:\n filters (int): Number of filters.\n kernel_size (int): kernel size.\n strides (int): Stride width.\n padding (str): Padding type ("same" or "valid").\n '
super().__init__(**kwargs)
self.conv1d_transpose = tf.keras.layers.Conv2DTranspose(filters=filters, kernel_size=(kernel_size, 1), strides=(strides, 1), padding='same', kernel_initializer=get_initializer(initializer_seed))
if is_weight_norm:
self.conv1d_transpose = WeightNormalization(self.conv1d_transpose) | Initialize TFConvTranspose1d( module.
Args:
filters (int): Number of filters.
kernel_size (int): kernel size.
strides (int): Stride width.
padding (str): Padding type ("same" or "valid"). | models/vocoder.py | __init__ | Z-yq/TensorflowTTS | 50 | python | def __init__(self, filters, kernel_size, strides, padding, is_weight_norm, initializer_seed, **kwargs):
'Initialize TFConvTranspose1d( module.\n Args:\n filters (int): Number of filters.\n kernel_size (int): kernel size.\n strides (int): Stride width.\n padding (str): Padding type ("same" or "valid").\n '
super().__init__(**kwargs)
self.conv1d_transpose = tf.keras.layers.Conv2DTranspose(filters=filters, kernel_size=(kernel_size, 1), strides=(strides, 1), padding='same', kernel_initializer=get_initializer(initializer_seed))
if is_weight_norm:
self.conv1d_transpose = WeightNormalization(self.conv1d_transpose) | def __init__(self, filters, kernel_size, strides, padding, is_weight_norm, initializer_seed, **kwargs):
'Initialize TFConvTranspose1d( module.\n Args:\n filters (int): Number of filters.\n kernel_size (int): kernel size.\n strides (int): Stride width.\n padding (str): Padding type ("same" or "valid").\n '
super().__init__(**kwargs)
self.conv1d_transpose = tf.keras.layers.Conv2DTranspose(filters=filters, kernel_size=(kernel_size, 1), strides=(strides, 1), padding='same', kernel_initializer=get_initializer(initializer_seed))
if is_weight_norm:
self.conv1d_transpose = WeightNormalization(self.conv1d_transpose)<|docstring|>Initialize TFConvTranspose1d( module.
Args:
filters (int): Number of filters.
kernel_size (int): kernel size.
strides (int): Stride width.
padding (str): Padding type ("same" or "valid").<|endoftext|> |
2f78426e3294b86e62c7c3c33e393c7e880c3b5d7e9c9e2b0b9e5effd813318a | def call(self, x):
"Calculate forward propagation.\n Args:\n x (Tensor): Input tensor (B, T, C).\n Returns:\n Tensor: Output tensor (B, T', C').\n "
x = tf.expand_dims(x, 2)
x = self.conv1d_transpose(x)
x = tf.squeeze(x, 2)
return x | Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, C).
Returns:
Tensor: Output tensor (B, T', C'). | models/vocoder.py | call | Z-yq/TensorflowTTS | 50 | python | def call(self, x):
"Calculate forward propagation.\n Args:\n x (Tensor): Input tensor (B, T, C).\n Returns:\n Tensor: Output tensor (B, T', C').\n "
x = tf.expand_dims(x, 2)
x = self.conv1d_transpose(x)
x = tf.squeeze(x, 2)
return x | def call(self, x):
"Calculate forward propagation.\n Args:\n x (Tensor): Input tensor (B, T, C).\n Returns:\n Tensor: Output tensor (B, T', C').\n "
x = tf.expand_dims(x, 2)
x = self.conv1d_transpose(x)
x = tf.squeeze(x, 2)
return x<|docstring|>Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, C).
Returns:
Tensor: Output tensor (B, T', C').<|endoftext|> |
74fceb30e50580e804dae3aee79796b4460d1feb1db41bc9fb9ddb19d87e68b2 | def __init__(self, kernel_size, filters, dilation_rate, use_bias, nonlinear_activation, nonlinear_activation_params, is_weight_norm, initializer_seed, **kwargs):
'Initialize TFResidualStack module.\n Args:\n kernel_size (int): Kernel size.\n filters (int): Number of filters.\n dilation_rate (int): Dilation rate.\n use_bias (bool): Whether to add bias parameter in convolution layers.\n nonlinear_activation (str): Activation function module name.\n nonlinear_activation_params (dict): Hyperparameters for activation function.\n '
super().__init__(**kwargs)
self.blocks = [getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params), TFReflectionPad1d((((kernel_size - 1) // 2) * dilation_rate)), tf.keras.layers.Conv1D(filters=filters, kernel_size=kernel_size, dilation_rate=dilation_rate, use_bias=use_bias, kernel_initializer=get_initializer(initializer_seed)), getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params), tf.keras.layers.Conv1D(filters=filters, kernel_size=1, use_bias=use_bias, kernel_initializer=get_initializer(initializer_seed))]
self.shortcut = tf.keras.layers.Conv1D(filters=filters, kernel_size=1, use_bias=use_bias, kernel_initializer=get_initializer(initializer_seed), name='shortcut')
if is_weight_norm:
self._apply_weightnorm(self.blocks)
self.shortcut = WeightNormalization(self.shortcut) | Initialize TFResidualStack module.
Args:
kernel_size (int): Kernel size.
filters (int): Number of filters.
dilation_rate (int): Dilation rate.
use_bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function. | models/vocoder.py | __init__ | Z-yq/TensorflowTTS | 50 | python | def __init__(self, kernel_size, filters, dilation_rate, use_bias, nonlinear_activation, nonlinear_activation_params, is_weight_norm, initializer_seed, **kwargs):
'Initialize TFResidualStack module.\n Args:\n kernel_size (int): Kernel size.\n filters (int): Number of filters.\n dilation_rate (int): Dilation rate.\n use_bias (bool): Whether to add bias parameter in convolution layers.\n nonlinear_activation (str): Activation function module name.\n nonlinear_activation_params (dict): Hyperparameters for activation function.\n '
super().__init__(**kwargs)
self.blocks = [getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params), TFReflectionPad1d((((kernel_size - 1) // 2) * dilation_rate)), tf.keras.layers.Conv1D(filters=filters, kernel_size=kernel_size, dilation_rate=dilation_rate, use_bias=use_bias, kernel_initializer=get_initializer(initializer_seed)), getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params), tf.keras.layers.Conv1D(filters=filters, kernel_size=1, use_bias=use_bias, kernel_initializer=get_initializer(initializer_seed))]
self.shortcut = tf.keras.layers.Conv1D(filters=filters, kernel_size=1, use_bias=use_bias, kernel_initializer=get_initializer(initializer_seed), name='shortcut')
if is_weight_norm:
self._apply_weightnorm(self.blocks)
self.shortcut = WeightNormalization(self.shortcut) | def __init__(self, kernel_size, filters, dilation_rate, use_bias, nonlinear_activation, nonlinear_activation_params, is_weight_norm, initializer_seed, **kwargs):
'Initialize TFResidualStack module.\n Args:\n kernel_size (int): Kernel size.\n filters (int): Number of filters.\n dilation_rate (int): Dilation rate.\n use_bias (bool): Whether to add bias parameter in convolution layers.\n nonlinear_activation (str): Activation function module name.\n nonlinear_activation_params (dict): Hyperparameters for activation function.\n '
super().__init__(**kwargs)
self.blocks = [getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params), TFReflectionPad1d((((kernel_size - 1) // 2) * dilation_rate)), tf.keras.layers.Conv1D(filters=filters, kernel_size=kernel_size, dilation_rate=dilation_rate, use_bias=use_bias, kernel_initializer=get_initializer(initializer_seed)), getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params), tf.keras.layers.Conv1D(filters=filters, kernel_size=1, use_bias=use_bias, kernel_initializer=get_initializer(initializer_seed))]
self.shortcut = tf.keras.layers.Conv1D(filters=filters, kernel_size=1, use_bias=use_bias, kernel_initializer=get_initializer(initializer_seed), name='shortcut')
if is_weight_norm:
self._apply_weightnorm(self.blocks)
self.shortcut = WeightNormalization(self.shortcut)<|docstring|>Initialize TFResidualStack module.
Args:
kernel_size (int): Kernel size.
filters (int): Number of filters.
dilation_rate (int): Dilation rate.
use_bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.<|endoftext|> |
2f0cdfbd573989b4098e69c95b18d98b87f0d0306b9b278286c32df281b4c6b9 | def call(self, x):
'Calculate forward propagation.\n Args:\n x (Tensor): Input tensor (B, T, C).\n Returns:\n Tensor: Output tensor (B, T, C).\n '
_x = tf.identity(x)
for layer in self.blocks:
_x = layer(_x)
shortcut = self.shortcut(x)
return (shortcut + _x) | Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, C).
Returns:
Tensor: Output tensor (B, T, C). | models/vocoder.py | call | Z-yq/TensorflowTTS | 50 | python | def call(self, x):
'Calculate forward propagation.\n Args:\n x (Tensor): Input tensor (B, T, C).\n Returns:\n Tensor: Output tensor (B, T, C).\n '
_x = tf.identity(x)
for layer in self.blocks:
_x = layer(_x)
shortcut = self.shortcut(x)
return (shortcut + _x) | def call(self, x):
'Calculate forward propagation.\n Args:\n x (Tensor): Input tensor (B, T, C).\n Returns:\n Tensor: Output tensor (B, T, C).\n '
_x = tf.identity(x)
for layer in self.blocks:
_x = layer(_x)
shortcut = self.shortcut(x)
return (shortcut + _x)<|docstring|>Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, C).
Returns:
Tensor: Output tensor (B, T, C).<|endoftext|> |
d0da2c60d96978f7c2dad3d4ff81ea33ae5d6a007e1f929dd3216fe7bb6ff62f | def __init__(self, config, **kwargs):
'Initialize TFMelGANGenerator module.\n Args:\n config: config object of Melgan generator.\n '
super().__init__(**kwargs)
assert (config.filters >= np.prod(config.upsample_scales))
assert ((config.filters % (2 ** len(config.upsample_scales))) == 0)
self.config = config
layers = []
layers += [TFReflectionPad1d(((config.kernel_size - 1) // 2), padding_type=config.padding_type, name='first_reflect_padding'), tf.keras.layers.Conv1D(filters=config.filters, kernel_size=config.kernel_size, use_bias=config.use_bias, kernel_initializer=get_initializer(config.initializer_seed))]
layers += [TFReflectionPad1d(((config.kernel_size - 1) // 2), padding_type=config.padding_type, name='second_reflect_padding'), tf.keras.layers.Conv1D(filters=config.filters, kernel_size=config.kernel_size, use_bias=config.use_bias, kernel_initializer=get_initializer(config.initializer_seed)), TFReflectionPad1d(((config.kernel_size - 1) // 2), padding_type=config.padding_type, name='third_reflect_padding'), tf.keras.layers.Conv1D(filters=config.filters, kernel_size=config.kernel_size, use_bias=config.use_bias, kernel_initializer=get_initializer(config.initializer_seed))]
for (i, upsample_scale) in enumerate(config.upsample_scales):
layers += [getattr(tf.keras.layers, config.nonlinear_activation)(**config.nonlinear_activation_params), TFConvTranspose1d(filters=(config.filters // (2 ** (i + 1))), kernel_size=(upsample_scale * 2), strides=upsample_scale, padding='same', is_weight_norm=config.is_weight_norm, initializer_seed=config.initializer_seed, name='conv_transpose_._{}'.format(i))]
for j in range(config.stacks):
layers += [TFResidualStack(kernel_size=config.stack_kernel_size, filters=(config.filters // (2 ** (i + 1))), dilation_rate=(config.stack_kernel_size ** j), use_bias=config.use_bias, nonlinear_activation=config.nonlinear_activation, nonlinear_activation_params=config.nonlinear_activation_params, is_weight_norm=config.is_weight_norm, initializer_seed=config.initializer_seed, name='residual_stack_._{}._._{}'.format(i, j))]
layers += [getattr(tf.keras.layers, config.nonlinear_activation)(**config.nonlinear_activation_params), TFReflectionPad1d(((config.kernel_size - 1) // 2), padding_type=config.padding_type, name='last_reflect_padding'), tf.keras.layers.Conv1D(filters=config.out_channels, kernel_size=config.kernel_size, use_bias=config.use_bias, kernel_initializer=get_initializer(config.initializer_seed))]
if config.use_final_nolinear_activation:
layers += [tf.keras.layers.Activation('tanh')]
layers += [tf.keras.layers.Reshape([(- 1), 1])]
if (config.is_weight_norm is True):
self._apply_weightnorm(layers)
self.melgan = tf.keras.models.Sequential(layers) | Initialize TFMelGANGenerator module.
Args:
config: config object of Melgan generator. | models/vocoder.py | __init__ | Z-yq/TensorflowTTS | 50 | python | def __init__(self, config, **kwargs):
'Initialize TFMelGANGenerator module.\n Args:\n config: config object of Melgan generator.\n '
super().__init__(**kwargs)
assert (config.filters >= np.prod(config.upsample_scales))
assert ((config.filters % (2 ** len(config.upsample_scales))) == 0)
self.config = config
layers = []
layers += [TFReflectionPad1d(((config.kernel_size - 1) // 2), padding_type=config.padding_type, name='first_reflect_padding'), tf.keras.layers.Conv1D(filters=config.filters, kernel_size=config.kernel_size, use_bias=config.use_bias, kernel_initializer=get_initializer(config.initializer_seed))]
layers += [TFReflectionPad1d(((config.kernel_size - 1) // 2), padding_type=config.padding_type, name='second_reflect_padding'), tf.keras.layers.Conv1D(filters=config.filters, kernel_size=config.kernel_size, use_bias=config.use_bias, kernel_initializer=get_initializer(config.initializer_seed)), TFReflectionPad1d(((config.kernel_size - 1) // 2), padding_type=config.padding_type, name='third_reflect_padding'), tf.keras.layers.Conv1D(filters=config.filters, kernel_size=config.kernel_size, use_bias=config.use_bias, kernel_initializer=get_initializer(config.initializer_seed))]
for (i, upsample_scale) in enumerate(config.upsample_scales):
layers += [getattr(tf.keras.layers, config.nonlinear_activation)(**config.nonlinear_activation_params), TFConvTranspose1d(filters=(config.filters // (2 ** (i + 1))), kernel_size=(upsample_scale * 2), strides=upsample_scale, padding='same', is_weight_norm=config.is_weight_norm, initializer_seed=config.initializer_seed, name='conv_transpose_._{}'.format(i))]
for j in range(config.stacks):
layers += [TFResidualStack(kernel_size=config.stack_kernel_size, filters=(config.filters // (2 ** (i + 1))), dilation_rate=(config.stack_kernel_size ** j), use_bias=config.use_bias, nonlinear_activation=config.nonlinear_activation, nonlinear_activation_params=config.nonlinear_activation_params, is_weight_norm=config.is_weight_norm, initializer_seed=config.initializer_seed, name='residual_stack_._{}._._{}'.format(i, j))]
layers += [getattr(tf.keras.layers, config.nonlinear_activation)(**config.nonlinear_activation_params), TFReflectionPad1d(((config.kernel_size - 1) // 2), padding_type=config.padding_type, name='last_reflect_padding'), tf.keras.layers.Conv1D(filters=config.out_channels, kernel_size=config.kernel_size, use_bias=config.use_bias, kernel_initializer=get_initializer(config.initializer_seed))]
if config.use_final_nolinear_activation:
layers += [tf.keras.layers.Activation('tanh')]
layers += [tf.keras.layers.Reshape([(- 1), 1])]
if (config.is_weight_norm is True):
self._apply_weightnorm(layers)
self.melgan = tf.keras.models.Sequential(layers) | def __init__(self, config, **kwargs):
'Initialize TFMelGANGenerator module.\n Args:\n config: config object of Melgan generator.\n '
super().__init__(**kwargs)
assert (config.filters >= np.prod(config.upsample_scales))
assert ((config.filters % (2 ** len(config.upsample_scales))) == 0)
self.config = config
layers = []
layers += [TFReflectionPad1d(((config.kernel_size - 1) // 2), padding_type=config.padding_type, name='first_reflect_padding'), tf.keras.layers.Conv1D(filters=config.filters, kernel_size=config.kernel_size, use_bias=config.use_bias, kernel_initializer=get_initializer(config.initializer_seed))]
layers += [TFReflectionPad1d(((config.kernel_size - 1) // 2), padding_type=config.padding_type, name='second_reflect_padding'), tf.keras.layers.Conv1D(filters=config.filters, kernel_size=config.kernel_size, use_bias=config.use_bias, kernel_initializer=get_initializer(config.initializer_seed)), TFReflectionPad1d(((config.kernel_size - 1) // 2), padding_type=config.padding_type, name='third_reflect_padding'), tf.keras.layers.Conv1D(filters=config.filters, kernel_size=config.kernel_size, use_bias=config.use_bias, kernel_initializer=get_initializer(config.initializer_seed))]
for (i, upsample_scale) in enumerate(config.upsample_scales):
layers += [getattr(tf.keras.layers, config.nonlinear_activation)(**config.nonlinear_activation_params), TFConvTranspose1d(filters=(config.filters // (2 ** (i + 1))), kernel_size=(upsample_scale * 2), strides=upsample_scale, padding='same', is_weight_norm=config.is_weight_norm, initializer_seed=config.initializer_seed, name='conv_transpose_._{}'.format(i))]
for j in range(config.stacks):
layers += [TFResidualStack(kernel_size=config.stack_kernel_size, filters=(config.filters // (2 ** (i + 1))), dilation_rate=(config.stack_kernel_size ** j), use_bias=config.use_bias, nonlinear_activation=config.nonlinear_activation, nonlinear_activation_params=config.nonlinear_activation_params, is_weight_norm=config.is_weight_norm, initializer_seed=config.initializer_seed, name='residual_stack_._{}._._{}'.format(i, j))]
layers += [getattr(tf.keras.layers, config.nonlinear_activation)(**config.nonlinear_activation_params), TFReflectionPad1d(((config.kernel_size - 1) // 2), padding_type=config.padding_type, name='last_reflect_padding'), tf.keras.layers.Conv1D(filters=config.out_channels, kernel_size=config.kernel_size, use_bias=config.use_bias, kernel_initializer=get_initializer(config.initializer_seed))]
if config.use_final_nolinear_activation:
layers += [tf.keras.layers.Activation('tanh')]
layers += [tf.keras.layers.Reshape([(- 1), 1])]
if (config.is_weight_norm is True):
self._apply_weightnorm(layers)
self.melgan = tf.keras.models.Sequential(layers)<|docstring|>Initialize TFMelGANGenerator module.
Args:
config: config object of Melgan generator.<|endoftext|> |
e3a7259182e338768cf6c5e1a2e8bb8732eb17a4f91884941493a70b7a100c35 | def call(self, c):
'Calculate forward propagation.\n Args:\n c (Tensor): Input tensor (B, T, channels)\n Returns:\n Tensor: Output tensor (B, T ** prod(upsample_scales), out_channels)\n '
x = self.melgan(c)
return x | Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, T, channels)
Returns:
Tensor: Output tensor (B, T ** prod(upsample_scales), out_channels) | models/vocoder.py | call | Z-yq/TensorflowTTS | 50 | python | def call(self, c):
'Calculate forward propagation.\n Args:\n c (Tensor): Input tensor (B, T, channels)\n Returns:\n Tensor: Output tensor (B, T ** prod(upsample_scales), out_channels)\n '
x = self.melgan(c)
return x | def call(self, c):
'Calculate forward propagation.\n Args:\n c (Tensor): Input tensor (B, T, channels)\n Returns:\n Tensor: Output tensor (B, T ** prod(upsample_scales), out_channels)\n '
x = self.melgan(c)
return x<|docstring|>Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, T, channels)
Returns:
Tensor: Output tensor (B, T ** prod(upsample_scales), out_channels)<|endoftext|> |
36a1508b4e78dc956475e9d9382c04c17231fcebb859226662049026d1f8fb57 | def _build(self):
'Build model by passing fake input.'
fake_mels = tf.random.uniform(shape=[1, 200, self.config.num_mels], dtype=tf.float32)
self(fake_mels) | Build model by passing fake input. | models/vocoder.py | _build | Z-yq/TensorflowTTS | 50 | python | def _build(self):
fake_mels = tf.random.uniform(shape=[1, 200, self.config.num_mels], dtype=tf.float32)
self(fake_mels) | def _build(self):
fake_mels = tf.random.uniform(shape=[1, 200, self.config.num_mels], dtype=tf.float32)
self(fake_mels)<|docstring|>Build model by passing fake input.<|endoftext|> |
93c53d9b7c0cb009fac236dbc5781c870177e574aff22bf986c9fa0153567939 | def __init__(self, config, **kwargs):
'Initilize MelGAN discriminator module.\n Args:\n out_channels (int): Number of output channels.\n kernel_sizes (list): List of two kernel sizes. The prod will be used for the first conv layer,\n and the first and the second kernel sizes will be used for the last two layers.\n For example if kernel_sizes = [5, 3], the first layer kernel size will be 5 * 3 = 15.\n the last two layers\' kernel size will be 5 and 3, respectively.\n filters (int): Initial number of filters for conv layer.\n max_downsample_filters (int): Maximum number of filters for downsampling layers.\n use_bias (bool): Whether to add bias parameter in convolution layers.\n downsample_scales (list): List of downsampling scales.\n nonlinear_activation (str): Activation function module name.\n nonlinear_activation_params (dict): Hyperparameters for activation function.\n padding_type (str): Padding type (support only "REFLECT", "CONSTANT", "SYMMETRIC")\n '
super().__init__(**kwargs)
out_channels = config.out_channels
kernel_sizes = config.kernel_sizes
filters = config.filters
max_downsample_filters = config.max_downsample_filters
use_bias = config.use_bias
downsample_scales = config.downsample_scales
nonlinear_activation = config.nonlinear_activation
nonlinear_activation_params = config.nonlinear_activation_params
padding_type = config.padding_type
is_weight_norm = config.is_weight_norm
initializer_seed = config.initializer_seed
assert (len(kernel_sizes) == 2)
assert ((kernel_sizes[0] % 2) == 1)
assert ((kernel_sizes[1] % 2) == 1)
discriminator = [TFReflectionPad1d(((np.prod(kernel_sizes) - 1) // 2), padding_type=padding_type), tf.keras.layers.Conv1D(filters=filters, kernel_size=int(np.prod(kernel_sizes)), use_bias=use_bias, kernel_initializer=get_initializer(initializer_seed)), getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params)]
in_chs = filters
for downsample_scale in downsample_scales:
out_chs = min((in_chs * downsample_scale), max_downsample_filters)
discriminator += [tf.keras.layers.Conv1D(out_chs, ((downsample_scale * 10) + 1), downsample_scale, padding='same', kernel_initializer=get_initializer(initializer_seed))]
discriminator += [getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params)]
in_chs = out_chs
out_chs = min((in_chs * 2), max_downsample_filters)
discriminator += [tf.keras.layers.Conv1D(filters=out_chs, kernel_size=kernel_sizes[0], padding='same', use_bias=use_bias, kernel_initializer=get_initializer(initializer_seed))]
discriminator += [getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params)]
discriminator += [tf.keras.layers.Conv1D(filters=out_channels, kernel_size=kernel_sizes[1], padding='same', use_bias=use_bias, kernel_initializer=get_initializer(initializer_seed))]
if (is_weight_norm is True):
self._apply_weightnorm(discriminator)
self.disciminator = discriminator | Initilize MelGAN discriminator module.
Args:
out_channels (int): Number of output channels.
kernel_sizes (list): List of two kernel sizes. The prod will be used for the first conv layer,
and the first and the second kernel sizes will be used for the last two layers.
For example if kernel_sizes = [5, 3], the first layer kernel size will be 5 * 3 = 15.
the last two layers' kernel size will be 5 and 3, respectively.
filters (int): Initial number of filters for conv layer.
max_downsample_filters (int): Maximum number of filters for downsampling layers.
use_bias (bool): Whether to add bias parameter in convolution layers.
downsample_scales (list): List of downsampling scales.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
padding_type (str): Padding type (support only "REFLECT", "CONSTANT", "SYMMETRIC") | models/vocoder.py | __init__ | Z-yq/TensorflowTTS | 50 | python | def __init__(self, config, **kwargs):
'Initilize MelGAN discriminator module.\n Args:\n out_channels (int): Number of output channels.\n kernel_sizes (list): List of two kernel sizes. The prod will be used for the first conv layer,\n and the first and the second kernel sizes will be used for the last two layers.\n For example if kernel_sizes = [5, 3], the first layer kernel size will be 5 * 3 = 15.\n the last two layers\' kernel size will be 5 and 3, respectively.\n filters (int): Initial number of filters for conv layer.\n max_downsample_filters (int): Maximum number of filters for downsampling layers.\n use_bias (bool): Whether to add bias parameter in convolution layers.\n downsample_scales (list): List of downsampling scales.\n nonlinear_activation (str): Activation function module name.\n nonlinear_activation_params (dict): Hyperparameters for activation function.\n padding_type (str): Padding type (support only "REFLECT", "CONSTANT", "SYMMETRIC")\n '
super().__init__(**kwargs)
out_channels = config.out_channels
kernel_sizes = config.kernel_sizes
filters = config.filters
max_downsample_filters = config.max_downsample_filters
use_bias = config.use_bias
downsample_scales = config.downsample_scales
nonlinear_activation = config.nonlinear_activation
nonlinear_activation_params = config.nonlinear_activation_params
padding_type = config.padding_type
is_weight_norm = config.is_weight_norm
initializer_seed = config.initializer_seed
assert (len(kernel_sizes) == 2)
assert ((kernel_sizes[0] % 2) == 1)
assert ((kernel_sizes[1] % 2) == 1)
discriminator = [TFReflectionPad1d(((np.prod(kernel_sizes) - 1) // 2), padding_type=padding_type), tf.keras.layers.Conv1D(filters=filters, kernel_size=int(np.prod(kernel_sizes)), use_bias=use_bias, kernel_initializer=get_initializer(initializer_seed)), getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params)]
in_chs = filters
for downsample_scale in downsample_scales:
out_chs = min((in_chs * downsample_scale), max_downsample_filters)
discriminator += [tf.keras.layers.Conv1D(out_chs, ((downsample_scale * 10) + 1), downsample_scale, padding='same', kernel_initializer=get_initializer(initializer_seed))]
discriminator += [getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params)]
in_chs = out_chs
out_chs = min((in_chs * 2), max_downsample_filters)
discriminator += [tf.keras.layers.Conv1D(filters=out_chs, kernel_size=kernel_sizes[0], padding='same', use_bias=use_bias, kernel_initializer=get_initializer(initializer_seed))]
discriminator += [getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params)]
discriminator += [tf.keras.layers.Conv1D(filters=out_channels, kernel_size=kernel_sizes[1], padding='same', use_bias=use_bias, kernel_initializer=get_initializer(initializer_seed))]
if (is_weight_norm is True):
self._apply_weightnorm(discriminator)
self.disciminator = discriminator | def __init__(self, config, **kwargs):
'Initilize MelGAN discriminator module.\n Args:\n out_channels (int): Number of output channels.\n kernel_sizes (list): List of two kernel sizes. The prod will be used for the first conv layer,\n and the first and the second kernel sizes will be used for the last two layers.\n For example if kernel_sizes = [5, 3], the first layer kernel size will be 5 * 3 = 15.\n the last two layers\' kernel size will be 5 and 3, respectively.\n filters (int): Initial number of filters for conv layer.\n max_downsample_filters (int): Maximum number of filters for downsampling layers.\n use_bias (bool): Whether to add bias parameter in convolution layers.\n downsample_scales (list): List of downsampling scales.\n nonlinear_activation (str): Activation function module name.\n nonlinear_activation_params (dict): Hyperparameters for activation function.\n padding_type (str): Padding type (support only "REFLECT", "CONSTANT", "SYMMETRIC")\n '
super().__init__(**kwargs)
out_channels = config.out_channels
kernel_sizes = config.kernel_sizes
filters = config.filters
max_downsample_filters = config.max_downsample_filters
use_bias = config.use_bias
downsample_scales = config.downsample_scales
nonlinear_activation = config.nonlinear_activation
nonlinear_activation_params = config.nonlinear_activation_params
padding_type = config.padding_type
is_weight_norm = config.is_weight_norm
initializer_seed = config.initializer_seed
assert (len(kernel_sizes) == 2)
assert ((kernel_sizes[0] % 2) == 1)
assert ((kernel_sizes[1] % 2) == 1)
discriminator = [TFReflectionPad1d(((np.prod(kernel_sizes) - 1) // 2), padding_type=padding_type), tf.keras.layers.Conv1D(filters=filters, kernel_size=int(np.prod(kernel_sizes)), use_bias=use_bias, kernel_initializer=get_initializer(initializer_seed)), getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params)]
in_chs = filters
for downsample_scale in downsample_scales:
out_chs = min((in_chs * downsample_scale), max_downsample_filters)
discriminator += [tf.keras.layers.Conv1D(out_chs, ((downsample_scale * 10) + 1), downsample_scale, padding='same', kernel_initializer=get_initializer(initializer_seed))]
discriminator += [getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params)]
in_chs = out_chs
out_chs = min((in_chs * 2), max_downsample_filters)
discriminator += [tf.keras.layers.Conv1D(filters=out_chs, kernel_size=kernel_sizes[0], padding='same', use_bias=use_bias, kernel_initializer=get_initializer(initializer_seed))]
discriminator += [getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params)]
discriminator += [tf.keras.layers.Conv1D(filters=out_channels, kernel_size=kernel_sizes[1], padding='same', use_bias=use_bias, kernel_initializer=get_initializer(initializer_seed))]
if (is_weight_norm is True):
self._apply_weightnorm(discriminator)
self.disciminator = discriminator<|docstring|>Initilize MelGAN discriminator module.
Args:
out_channels (int): Number of output channels.
kernel_sizes (list): List of two kernel sizes. The prod will be used for the first conv layer,
and the first and the second kernel sizes will be used for the last two layers.
For example if kernel_sizes = [5, 3], the first layer kernel size will be 5 * 3 = 15.
the last two layers' kernel size will be 5 and 3, respectively.
filters (int): Initial number of filters for conv layer.
max_downsample_filters (int): Maximum number of filters for downsampling layers.
use_bias (bool): Whether to add bias parameter in convolution layers.
downsample_scales (list): List of downsampling scales.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
padding_type (str): Padding type (support only "REFLECT", "CONSTANT", "SYMMETRIC")<|endoftext|> |
796bc2613de6b78bcd83230888b6f93c5a060f94ab32c1058b5d10ea3294c59d | def call(self, x):
'Calculate forward propagation.\n Args:\n x (Tensor): Input noise signal (B, T, 1).\n Returns:\n List: List of output tensors of each layer.\n '
outs = []
for f in self.disciminator:
x = f(x)
outs += [x]
return outs | Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, T, 1).
Returns:
List: List of output tensors of each layer. | models/vocoder.py | call | Z-yq/TensorflowTTS | 50 | python | def call(self, x):
'Calculate forward propagation.\n Args:\n x (Tensor): Input noise signal (B, T, 1).\n Returns:\n List: List of output tensors of each layer.\n '
outs = []
for f in self.disciminator:
x = f(x)
outs += [x]
return outs | def call(self, x):
'Calculate forward propagation.\n Args:\n x (Tensor): Input noise signal (B, T, 1).\n Returns:\n List: List of output tensors of each layer.\n '
outs = []
for f in self.disciminator:
x = f(x)
outs += [x]
return outs<|docstring|>Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, T, 1).
Returns:
List: List of output tensors of each layer.<|endoftext|> |
e5cac4a78b385e2092aa71b4757f9954a813ecfff7a3be9124ade776535cc1c2 | def _apply_weightnorm(self, list_layers):
'Try apply weightnorm for all layer in list_layers.'
for i in range(len(list_layers)):
try:
layer_name = list_layers[i].name.lower()
if (('conv1d' in layer_name) or ('dense' in layer_name)):
list_layers[i] = WeightNormalization(list_layers[i])
except Exception:
pass | Try apply weightnorm for all layer in list_layers. | models/vocoder.py | _apply_weightnorm | Z-yq/TensorflowTTS | 50 | python | def _apply_weightnorm(self, list_layers):
for i in range(len(list_layers)):
try:
layer_name = list_layers[i].name.lower()
if (('conv1d' in layer_name) or ('dense' in layer_name)):
list_layers[i] = WeightNormalization(list_layers[i])
except Exception:
pass | def _apply_weightnorm(self, list_layers):
for i in range(len(list_layers)):
try:
layer_name = list_layers[i].name.lower()
if (('conv1d' in layer_name) or ('dense' in layer_name)):
list_layers[i] = WeightNormalization(list_layers[i])
except Exception:
pass<|docstring|>Try apply weightnorm for all layer in list_layers.<|endoftext|> |
373b8e6833fec62ab4abd4a6c617bd7e2c55b36cbcf67a96ddec278ae32d1a05 | def __init__(self, config, **kwargs):
'Initilize MelGAN multi-scale discriminator module.\n Args:\n config: config object for melgan discriminator\n '
super().__init__(**kwargs)
self.discriminator = []
for i in range(config.scales):
self.discriminator += [TFMelGANDiscriminator(config, name='melgan_discriminator_scale_._{}'.format(i))]
self.pooling = getattr(tf.keras.layers, config.downsample_pooling)(**config.downsample_pooling_params) | Initilize MelGAN multi-scale discriminator module.
Args:
config: config object for melgan discriminator | models/vocoder.py | __init__ | Z-yq/TensorflowTTS | 50 | python | def __init__(self, config, **kwargs):
'Initilize MelGAN multi-scale discriminator module.\n Args:\n config: config object for melgan discriminator\n '
super().__init__(**kwargs)
self.discriminator = []
for i in range(config.scales):
self.discriminator += [TFMelGANDiscriminator(config, name='melgan_discriminator_scale_._{}'.format(i))]
self.pooling = getattr(tf.keras.layers, config.downsample_pooling)(**config.downsample_pooling_params) | def __init__(self, config, **kwargs):
'Initilize MelGAN multi-scale discriminator module.\n Args:\n config: config object for melgan discriminator\n '
super().__init__(**kwargs)
self.discriminator = []
for i in range(config.scales):
self.discriminator += [TFMelGANDiscriminator(config, name='melgan_discriminator_scale_._{}'.format(i))]
self.pooling = getattr(tf.keras.layers, config.downsample_pooling)(**config.downsample_pooling_params)<|docstring|>Initilize MelGAN multi-scale discriminator module.
Args:
config: config object for melgan discriminator<|endoftext|> |
65ef2866cb628a43624ab4d7c3f626c9a47bc52ed4710d01106a445b24841dd0 | def _build(self):
'Build model by passing fake input.'
fake_mels = tf.random.uniform(shape=[1, 1600, 1], dtype=tf.float32)
self(fake_mels) | Build model by passing fake input. | models/vocoder.py | _build | Z-yq/TensorflowTTS | 50 | python | def _build(self):
fake_mels = tf.random.uniform(shape=[1, 1600, 1], dtype=tf.float32)
self(fake_mels) | def _build(self):
fake_mels = tf.random.uniform(shape=[1, 1600, 1], dtype=tf.float32)
self(fake_mels)<|docstring|>Build model by passing fake input.<|endoftext|> |
74fb52f2007d2435f6d941d67d62a6c43ed2ab9990cfcb55358ff31034966f81 | def call(self, x, **kwargs):
'Calculate forward propagation.\n Args:\n x (Tensor): Input noise signal (B, T, 1).\n Returns:\n List: List of list of each discriminator outputs, which consists of each layer output tensors.\n '
outs = []
for f in self.discriminator:
outs += [f(x)]
x = self.pooling(x)
return outs | Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, T, 1).
Returns:
List: List of list of each discriminator outputs, which consists of each layer output tensors. | models/vocoder.py | call | Z-yq/TensorflowTTS | 50 | python | def call(self, x, **kwargs):
'Calculate forward propagation.\n Args:\n x (Tensor): Input noise signal (B, T, 1).\n Returns:\n List: List of list of each discriminator outputs, which consists of each layer output tensors.\n '
outs = []
for f in self.discriminator:
outs += [f(x)]
x = self.pooling(x)
return outs | def call(self, x, **kwargs):
'Calculate forward propagation.\n Args:\n x (Tensor): Input noise signal (B, T, 1).\n Returns:\n List: List of list of each discriminator outputs, which consists of each layer output tensors.\n '
outs = []
for f in self.discriminator:
outs += [f(x)]
x = self.pooling(x)
return outs<|docstring|>Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, T, 1).
Returns:
List: List of list of each discriminator outputs, which consists of each layer output tensors.<|endoftext|> |
09d61b03c772775aef96c65e23e71702efa41355f4bbeb4f101c9ab6f2820646 | def handleGenericPacket(self, event):
'Decode the usbmuxd header.'
muxHeader = Struct.Group(None, Struct.UInt32BE('protocol'), Struct.UInt32BE('length'))
data = muxHeader.decode(event.data)
description = 'iPhone usbmuxd: '
if (muxHeader.length is None):
description += 'ERROR'
else:
self.remainingLength = (muxHeader.length - event.datalen)
description += ('proto=%s len=0x%04x' % (self.ipProto[muxHeader.protocol], muxHeader.length))
if self.remainingLength:
description += (' (0x%04x remaining)' % self.remainingLength)
event.pushDecoded(description)
if (self.ipProto[muxHeader.protocol] == 'TCP'):
self.handleTCP(event, data, (muxHeader.length - 8)) | Decode the usbmuxd header. | VUsbTools/Decoders/iPhone.py | handleGenericPacket | scanlime/vusb-analyzer | 34 | python | def handleGenericPacket(self, event):
muxHeader = Struct.Group(None, Struct.UInt32BE('protocol'), Struct.UInt32BE('length'))
data = muxHeader.decode(event.data)
description = 'iPhone usbmuxd: '
if (muxHeader.length is None):
description += 'ERROR'
else:
self.remainingLength = (muxHeader.length - event.datalen)
description += ('proto=%s len=0x%04x' % (self.ipProto[muxHeader.protocol], muxHeader.length))
if self.remainingLength:
description += (' (0x%04x remaining)' % self.remainingLength)
event.pushDecoded(description)
if (self.ipProto[muxHeader.protocol] == 'TCP'):
self.handleTCP(event, data, (muxHeader.length - 8)) | def handleGenericPacket(self, event):
muxHeader = Struct.Group(None, Struct.UInt32BE('protocol'), Struct.UInt32BE('length'))
data = muxHeader.decode(event.data)
description = 'iPhone usbmuxd: '
if (muxHeader.length is None):
description += 'ERROR'
else:
self.remainingLength = (muxHeader.length - event.datalen)
description += ('proto=%s len=0x%04x' % (self.ipProto[muxHeader.protocol], muxHeader.length))
if self.remainingLength:
description += (' (0x%04x remaining)' % self.remainingLength)
event.pushDecoded(description)
if (self.ipProto[muxHeader.protocol] == 'TCP'):
self.handleTCP(event, data, (muxHeader.length - 8))<|docstring|>Decode the usbmuxd header.<|endoftext|> |
c79b03757304c085c164d9ce29f055d33d868f6ac1da8105fa406811f5b77a98 | def handleTCP(self, event, data, datalen):
'Decode an IPPROTO_TCP packet header, and log the payload.'
datalen -= 20
tcpHeader = Struct.Group(None, Struct.UInt16BEHex('source'), Struct.UInt16BEHex('dest'), Struct.UInt32BE('seq'), Struct.UInt32BE('ack_seq'), Struct.UInt16BEHex('flags'), Struct.UInt16BE('window'), Struct.UInt16BEHex('checksum'), Struct.UInt16BEHex('urg_ptr'))
data = tcpHeader.decode(data)
event.pushDecoded(('iPhone TCP [%s -> %s] len=0x%04x' % (self.portNumbers[tcpHeader.source], self.portNumbers[tcpHeader.dest], datalen)))
event.appendDecoded(('\nTCP Header:\n%s' % str(tcpHeader)))
event.appendDecoded(('\nTCP Payload:\n%s' % Types.hexDump(data)))
for port in (tcpHeader.source, tcpHeader.dest):
fn = getattr(self, ('port_%s' % self.portNumbers[port]), None)
if fn:
fn(event, data, datalen) | Decode an IPPROTO_TCP packet header, and log the payload. | VUsbTools/Decoders/iPhone.py | handleTCP | scanlime/vusb-analyzer | 34 | python | def handleTCP(self, event, data, datalen):
datalen -= 20
tcpHeader = Struct.Group(None, Struct.UInt16BEHex('source'), Struct.UInt16BEHex('dest'), Struct.UInt32BE('seq'), Struct.UInt32BE('ack_seq'), Struct.UInt16BEHex('flags'), Struct.UInt16BE('window'), Struct.UInt16BEHex('checksum'), Struct.UInt16BEHex('urg_ptr'))
data = tcpHeader.decode(data)
event.pushDecoded(('iPhone TCP [%s -> %s] len=0x%04x' % (self.portNumbers[tcpHeader.source], self.portNumbers[tcpHeader.dest], datalen)))
event.appendDecoded(('\nTCP Header:\n%s' % str(tcpHeader)))
event.appendDecoded(('\nTCP Payload:\n%s' % Types.hexDump(data)))
for port in (tcpHeader.source, tcpHeader.dest):
fn = getattr(self, ('port_%s' % self.portNumbers[port]), None)
if fn:
fn(event, data, datalen) | def handleTCP(self, event, data, datalen):
datalen -= 20
tcpHeader = Struct.Group(None, Struct.UInt16BEHex('source'), Struct.UInt16BEHex('dest'), Struct.UInt32BE('seq'), Struct.UInt32BE('ack_seq'), Struct.UInt16BEHex('flags'), Struct.UInt16BE('window'), Struct.UInt16BEHex('checksum'), Struct.UInt16BEHex('urg_ptr'))
data = tcpHeader.decode(data)
event.pushDecoded(('iPhone TCP [%s -> %s] len=0x%04x' % (self.portNumbers[tcpHeader.source], self.portNumbers[tcpHeader.dest], datalen)))
event.appendDecoded(('\nTCP Header:\n%s' % str(tcpHeader)))
event.appendDecoded(('\nTCP Payload:\n%s' % Types.hexDump(data)))
for port in (tcpHeader.source, tcpHeader.dest):
fn = getattr(self, ('port_%s' % self.portNumbers[port]), None)
if fn:
fn(event, data, datalen)<|docstring|>Decode an IPPROTO_TCP packet header, and log the payload.<|endoftext|> |
e13701137a6002151667b7141652ca6e61f96fc2f62ad89c357b4f4df7726e28 | def port_lockdownd(self, event, data, datalen):
'Handle lockdownd packets. These form a stream, which may or\n may not line up with the underlying USB packets. Each\n lockdownd packet is an XML plist, prefixed with a 32-bit\n length.\n '
summary = []
self.lockdownBuffer += data
if (datalen == 0):
return
elif (datalen != len(data)):
self.lockdownBuffer = ''
summary.append('ERROR, incomplete log!')
elif ((len(self.lockdownBuffer) >= 10) and (self.lockdownBuffer[0] == '\x00') and isascii(self.lockdownBuffer[1:])):
summary.append(('Message, %r' % self.lockdownBuffer[1:]))
elif ((len(self.lockdownBuffer) >= 10) and (self.lockdownBuffer[4:9] != '<?xml')):
self.lockdownBuffer = ''
summary.append('UNRECOGNIZED (SSL encrypted?)')
else:
while (len(self.lockdownBuffer) >= 4):
length = struct.unpack('>I', self.lockdownBuffer[:4])[0]
if (len(self.lockdownBuffer) < (length + 4)):
break
packet = self.lockdownBuffer[4:(length + 4)]
self.lockdownBuffer = self.lockdownBuffer[(length + 4):]
event.appendDecoded(('\nComplete lockdownd packet:\n%s' % Types.hexDump(packet)))
kvFull = []
kvAbbrev = []
for (k, v) in plistlib.readPlistFromString(packet).items():
kvFull.append((' %s = %s' % (k, v)))
if isinstance(v, plistlib.Data):
v = '(data)'
elif isinstance(v, dict):
v = '(dict)'
kvAbbrev.append(('%s=%s' % (k, v)))
event.appendDecoded(('\nDecoded plist:\n%s' % '\n'.join(kvFull)))
summary.append(('{%s}' % ' '.join(kvAbbrev)))
event.pushDecoded(('lockdownd: %s' % (' '.join(summary) or 'fragment'))) | Handle lockdownd packets. These form a stream, which may or
may not line up with the underlying USB packets. Each
lockdownd packet is an XML plist, prefixed with a 32-bit
length. | VUsbTools/Decoders/iPhone.py | port_lockdownd | scanlime/vusb-analyzer | 34 | python | def port_lockdownd(self, event, data, datalen):
'Handle lockdownd packets. These form a stream, which may or\n may not line up with the underlying USB packets. Each\n lockdownd packet is an XML plist, prefixed with a 32-bit\n length.\n '
summary = []
self.lockdownBuffer += data
if (datalen == 0):
return
elif (datalen != len(data)):
self.lockdownBuffer =
summary.append('ERROR, incomplete log!')
elif ((len(self.lockdownBuffer) >= 10) and (self.lockdownBuffer[0] == '\x00') and isascii(self.lockdownBuffer[1:])):
summary.append(('Message, %r' % self.lockdownBuffer[1:]))
elif ((len(self.lockdownBuffer) >= 10) and (self.lockdownBuffer[4:9] != '<?xml')):
self.lockdownBuffer =
summary.append('UNRECOGNIZED (SSL encrypted?)')
else:
while (len(self.lockdownBuffer) >= 4):
length = struct.unpack('>I', self.lockdownBuffer[:4])[0]
if (len(self.lockdownBuffer) < (length + 4)):
break
packet = self.lockdownBuffer[4:(length + 4)]
self.lockdownBuffer = self.lockdownBuffer[(length + 4):]
event.appendDecoded(('\nComplete lockdownd packet:\n%s' % Types.hexDump(packet)))
kvFull = []
kvAbbrev = []
for (k, v) in plistlib.readPlistFromString(packet).items():
kvFull.append((' %s = %s' % (k, v)))
if isinstance(v, plistlib.Data):
v = '(data)'
elif isinstance(v, dict):
v = '(dict)'
kvAbbrev.append(('%s=%s' % (k, v)))
event.appendDecoded(('\nDecoded plist:\n%s' % '\n'.join(kvFull)))
summary.append(('{%s}' % ' '.join(kvAbbrev)))
event.pushDecoded(('lockdownd: %s' % (' '.join(summary) or 'fragment'))) | def port_lockdownd(self, event, data, datalen):
'Handle lockdownd packets. These form a stream, which may or\n may not line up with the underlying USB packets. Each\n lockdownd packet is an XML plist, prefixed with a 32-bit\n length.\n '
summary = []
self.lockdownBuffer += data
if (datalen == 0):
return
elif (datalen != len(data)):
self.lockdownBuffer =
summary.append('ERROR, incomplete log!')
elif ((len(self.lockdownBuffer) >= 10) and (self.lockdownBuffer[0] == '\x00') and isascii(self.lockdownBuffer[1:])):
summary.append(('Message, %r' % self.lockdownBuffer[1:]))
elif ((len(self.lockdownBuffer) >= 10) and (self.lockdownBuffer[4:9] != '<?xml')):
self.lockdownBuffer =
summary.append('UNRECOGNIZED (SSL encrypted?)')
else:
while (len(self.lockdownBuffer) >= 4):
length = struct.unpack('>I', self.lockdownBuffer[:4])[0]
if (len(self.lockdownBuffer) < (length + 4)):
break
packet = self.lockdownBuffer[4:(length + 4)]
self.lockdownBuffer = self.lockdownBuffer[(length + 4):]
event.appendDecoded(('\nComplete lockdownd packet:\n%s' % Types.hexDump(packet)))
kvFull = []
kvAbbrev = []
for (k, v) in plistlib.readPlistFromString(packet).items():
kvFull.append((' %s = %s' % (k, v)))
if isinstance(v, plistlib.Data):
v = '(data)'
elif isinstance(v, dict):
v = '(dict)'
kvAbbrev.append(('%s=%s' % (k, v)))
event.appendDecoded(('\nDecoded plist:\n%s' % '\n'.join(kvFull)))
summary.append(('{%s}' % ' '.join(kvAbbrev)))
event.pushDecoded(('lockdownd: %s' % (' '.join(summary) or 'fragment')))<|docstring|>Handle lockdownd packets. These form a stream, which may or
may not line up with the underlying USB packets. Each
lockdownd packet is an XML plist, prefixed with a 32-bit
length.<|endoftext|> |
2fdc6082a405d51e536295ed0d43afb4aba8307410ec0387ea9cf42367548218 | def fetch_n_load_dataset(dataset_url=DATASET_URL, dataset_path=DATASET_PATH):
'\n\t\tFetches and load dataset.\n\n\t\t:param dataset_url: The dataset url\n\t\t:type dataset_url: { The URl of dataset as variable}\n\t\t:param dataset_path: The dataset path\n\t\t:type dataset_path: { PATH details for the dataset as the var}\n\n\t\t:returns: The file at a particular file location .\n\t\t:rtype: { return_type of this function is the file path }\n\t\t'
if (not os.path.isdir(dataset_path)):
os.makedirs(dataset_path)
tgz_path = os.path.join(dataset_path, tgz_path)
csv_path = os.path.join(dataset_path, 'FuelConsumptionCo2.csv')
return pd.read_csv(csv_path) | Fetches and load dataset.
:param dataset_url: The dataset url
:type dataset_url: { The URl of dataset as variable}
:param dataset_path: The dataset path
:type dataset_path: { PATH details for the dataset as the var}
:returns: The file at a particular file location .
:rtype: { return_type of this function is the file path } | LinearRegression.py | fetch_n_load_dataset | ausaafnabi/Machine-Learning-Projects | 1 | python | def fetch_n_load_dataset(dataset_url=DATASET_URL, dataset_path=DATASET_PATH):
'\n\t\tFetches and load dataset.\n\n\t\t:param dataset_url: The dataset url\n\t\t:type dataset_url: { The URl of dataset as variable}\n\t\t:param dataset_path: The dataset path\n\t\t:type dataset_path: { PATH details for the dataset as the var}\n\n\t\t:returns: The file at a particular file location .\n\t\t:rtype: { return_type of this function is the file path }\n\t\t'
if (not os.path.isdir(dataset_path)):
os.makedirs(dataset_path)
tgz_path = os.path.join(dataset_path, tgz_path)
csv_path = os.path.join(dataset_path, 'FuelConsumptionCo2.csv')
return pd.read_csv(csv_path) | def fetch_n_load_dataset(dataset_url=DATASET_URL, dataset_path=DATASET_PATH):
'\n\t\tFetches and load dataset.\n\n\t\t:param dataset_url: The dataset url\n\t\t:type dataset_url: { The URl of dataset as variable}\n\t\t:param dataset_path: The dataset path\n\t\t:type dataset_path: { PATH details for the dataset as the var}\n\n\t\t:returns: The file at a particular file location .\n\t\t:rtype: { return_type of this function is the file path }\n\t\t'
if (not os.path.isdir(dataset_path)):
os.makedirs(dataset_path)
tgz_path = os.path.join(dataset_path, tgz_path)
csv_path = os.path.join(dataset_path, 'FuelConsumptionCo2.csv')
return pd.read_csv(csv_path)<|docstring|>Fetches and load dataset.
:param dataset_url: The dataset url
:type dataset_url: { The URl of dataset as variable}
:param dataset_path: The dataset path
:type dataset_path: { PATH details for the dataset as the var}
:returns: The file at a particular file location .
:rtype: { return_type of this function is the file path }<|endoftext|> |
71c239b3a0fa008c3442212829368e4c7db9892b2632a4bd9578e5e7bc454029 | def scan_resource_conf(self, conf):
'\n Looks for ViewerProtocolPolicy configuration at cloudfront distributions:\n https://www.terraform.io/docs/providers/aws/r/cloudfront_distribution.html#viewer_protocol_policy\n :param conf: cloudfront configuration\n :return: <CheckResult>\n '
if ('default_cache_behavior' in conf.keys()):
self.evaluated_keys = ['default_cache_behavior/[0]/viewer_protocol_policy']
if isinstance(conf['default_cache_behavior'][0], dict):
default_viewer_policy = conf['default_cache_behavior'][0]['viewer_protocol_policy']
if (default_viewer_policy and (default_viewer_policy[0] == 'allow-all')):
return CheckResult.FAILED
if ('ordered_cache_behavior' in conf.keys()):
for behavior in conf['ordered_cache_behavior']:
if isinstance(behavior, dict):
if (behavior['viewer_protocol_policy'][0] == 'allow-all'):
self.evaluated_keys = [f"ordered_cache_behavior/[{conf['ordered_cache_behavior'].index(behavior)}]/viewer_protocol_policy"]
return CheckResult.FAILED
return CheckResult.PASSED | Looks for ViewerProtocolPolicy configuration at cloudfront distributions:
https://www.terraform.io/docs/providers/aws/r/cloudfront_distribution.html#viewer_protocol_policy
:param conf: cloudfront configuration
:return: <CheckResult> | checkov/terraform/checks/resource/aws/CloudfrontDistributionEncryption.py | scan_resource_conf | tophersmith/checkov | 4,013 | python | def scan_resource_conf(self, conf):
'\n Looks for ViewerProtocolPolicy configuration at cloudfront distributions:\n https://www.terraform.io/docs/providers/aws/r/cloudfront_distribution.html#viewer_protocol_policy\n :param conf: cloudfront configuration\n :return: <CheckResult>\n '
if ('default_cache_behavior' in conf.keys()):
self.evaluated_keys = ['default_cache_behavior/[0]/viewer_protocol_policy']
if isinstance(conf['default_cache_behavior'][0], dict):
default_viewer_policy = conf['default_cache_behavior'][0]['viewer_protocol_policy']
if (default_viewer_policy and (default_viewer_policy[0] == 'allow-all')):
return CheckResult.FAILED
if ('ordered_cache_behavior' in conf.keys()):
for behavior in conf['ordered_cache_behavior']:
if isinstance(behavior, dict):
if (behavior['viewer_protocol_policy'][0] == 'allow-all'):
self.evaluated_keys = [f"ordered_cache_behavior/[{conf['ordered_cache_behavior'].index(behavior)}]/viewer_protocol_policy"]
return CheckResult.FAILED
return CheckResult.PASSED | def scan_resource_conf(self, conf):
'\n Looks for ViewerProtocolPolicy configuration at cloudfront distributions:\n https://www.terraform.io/docs/providers/aws/r/cloudfront_distribution.html#viewer_protocol_policy\n :param conf: cloudfront configuration\n :return: <CheckResult>\n '
if ('default_cache_behavior' in conf.keys()):
self.evaluated_keys = ['default_cache_behavior/[0]/viewer_protocol_policy']
if isinstance(conf['default_cache_behavior'][0], dict):
default_viewer_policy = conf['default_cache_behavior'][0]['viewer_protocol_policy']
if (default_viewer_policy and (default_viewer_policy[0] == 'allow-all')):
return CheckResult.FAILED
if ('ordered_cache_behavior' in conf.keys()):
for behavior in conf['ordered_cache_behavior']:
if isinstance(behavior, dict):
if (behavior['viewer_protocol_policy'][0] == 'allow-all'):
self.evaluated_keys = [f"ordered_cache_behavior/[{conf['ordered_cache_behavior'].index(behavior)}]/viewer_protocol_policy"]
return CheckResult.FAILED
return CheckResult.PASSED<|docstring|>Looks for ViewerProtocolPolicy configuration at cloudfront distributions:
https://www.terraform.io/docs/providers/aws/r/cloudfront_distribution.html#viewer_protocol_policy
:param conf: cloudfront configuration
:return: <CheckResult><|endoftext|> |
d676db0ec5935629d8e8b3d14155458bf02ab48b72993550adb7d6fa661539ce | @alias(func_alias='ext', _type='COMMON')
def run():
'\n extension\n\n Lists installed extensions.\n '
loaded_ext = gget('webshell.loaded_ext', namespace='webshell')
print()
if isinstance(loaded_ext, list):
print(color.magenta('Extension ---> \n'))
for line in loaded_ext:
print(color.cyan((' * ' + line)))
else:
print(color.red('Unknown...'))
print() | extension
Lists installed extensions. | doughnuts/webshell_plugins/extension.py | run | MorouU/Doughnuts | 5 | python | @alias(func_alias='ext', _type='COMMON')
def run():
'\n extension\n\n Lists installed extensions.\n '
loaded_ext = gget('webshell.loaded_ext', namespace='webshell')
print()
if isinstance(loaded_ext, list):
print(color.magenta('Extension ---> \n'))
for line in loaded_ext:
print(color.cyan((' * ' + line)))
else:
print(color.red('Unknown...'))
print() | @alias(func_alias='ext', _type='COMMON')
def run():
'\n extension\n\n Lists installed extensions.\n '
loaded_ext = gget('webshell.loaded_ext', namespace='webshell')
print()
if isinstance(loaded_ext, list):
print(color.magenta('Extension ---> \n'))
for line in loaded_ext:
print(color.cyan((' * ' + line)))
else:
print(color.red('Unknown...'))
print()<|docstring|>extension
Lists installed extensions.<|endoftext|> |
611932430ecac333e3cbeef981d8780e2218e7b4647b2e6fdf2550eb5e18f7fc | def chunks(l, n):
'\n Yield successive n-sized chunks from l.\n '
for i in range(0, len(l), n):
(yield l[i:(i + n)]) | Yield successive n-sized chunks from l. | source/training/dataset.py | chunks | Hunter8moon/h8m2 | 0 | python | def chunks(l, n):
'\n \n '
for i in range(0, len(l), n):
(yield l[i:(i + n)]) | def chunks(l, n):
'\n \n '
for i in range(0, len(l), n):
(yield l[i:(i + n)])<|docstring|>Yield successive n-sized chunks from l.<|endoftext|> |
bf8162e9e4531f101e38eb6a9a5896f4eb85f210f85e61ddc69734a9a55c23d8 | def load_batch(self, files, batch_size, augment=True):
'\n Returns an random sample of images of length batch_size from the filenames.\n '
batch_size = min(batch_size, len(files))
files = random.sample(files, batch_size)
w = self.shape[0]
h = self.shape[1]
images = []
for file in files:
img = ImageUtil.file_to_array(file, w, h, augment=augment)
images.append(img)
images = np.array(images)
return images | Returns an random sample of images of length batch_size from the filenames. | source/training/dataset.py | load_batch | Hunter8moon/h8m2 | 0 | python | def load_batch(self, files, batch_size, augment=True):
'\n \n '
batch_size = min(batch_size, len(files))
files = random.sample(files, batch_size)
w = self.shape[0]
h = self.shape[1]
images = []
for file in files:
img = ImageUtil.file_to_array(file, w, h, augment=augment)
images.append(img)
images = np.array(images)
return images | def load_batch(self, files, batch_size, augment=True):
'\n \n '
batch_size = min(batch_size, len(files))
files = random.sample(files, batch_size)
w = self.shape[0]
h = self.shape[1]
images = []
for file in files:
img = ImageUtil.file_to_array(file, w, h, augment=augment)
images.append(img)
images = np.array(images)
return images<|docstring|>Returns an random sample of images of length batch_size from the filenames.<|endoftext|> |
dd1642c0c8acca829fe85c98273a19fc9844959a698306df3ef168198a4bcb89 | def partition_files(self, batch_size):
'\n Shuffles the files and partitions them into chunks of length batch_size.\n Returns a list of pairs (a_files, b_files).\n '
random.shuffle(self.files_trainA)
random.shuffle(self.files_trainB)
a_files = chunks(self.files_trainA, batch_size)
b_files = chunks(self.files_trainB, batch_size)
return list(zip(a_files, b_files)) | Shuffles the files and partitions them into chunks of length batch_size.
Returns a list of pairs (a_files, b_files). | source/training/dataset.py | partition_files | Hunter8moon/h8m2 | 0 | python | def partition_files(self, batch_size):
'\n Shuffles the files and partitions them into chunks of length batch_size.\n Returns a list of pairs (a_files, b_files).\n '
random.shuffle(self.files_trainA)
random.shuffle(self.files_trainB)
a_files = chunks(self.files_trainA, batch_size)
b_files = chunks(self.files_trainB, batch_size)
return list(zip(a_files, b_files)) | def partition_files(self, batch_size):
'\n Shuffles the files and partitions them into chunks of length batch_size.\n Returns a list of pairs (a_files, b_files).\n '
random.shuffle(self.files_trainA)
random.shuffle(self.files_trainB)
a_files = chunks(self.files_trainA, batch_size)
b_files = chunks(self.files_trainB, batch_size)
return list(zip(a_files, b_files))<|docstring|>Shuffles the files and partitions them into chunks of length batch_size.
Returns a list of pairs (a_files, b_files).<|endoftext|> |
7ef05c291c9cf26464156a485d25990005dfdaf27b51cece4387f15b37d3ff74 | def get_ordering(self, request):
'\n Returns a sequence defining the default ordering for results in the\n list view.\n '
if (not self.ordering):
return (self.sort_order_field,)
elif (self.sort_order_field not in self.ordering):
return ((self.sort_order_field,) + tuple(self.ordering))
return self.ordering | Returns a sequence defining the default ordering for results in the
list view. | wagtailorderable/modeladmin/mixins.py | get_ordering | kausaltech/wagtail-orderable | 0 | python | def get_ordering(self, request):
'\n Returns a sequence defining the default ordering for results in the\n list view.\n '
if (not self.ordering):
return (self.sort_order_field,)
elif (self.sort_order_field not in self.ordering):
return ((self.sort_order_field,) + tuple(self.ordering))
return self.ordering | def get_ordering(self, request):
'\n Returns a sequence defining the default ordering for results in the\n list view.\n '
if (not self.ordering):
return (self.sort_order_field,)
elif (self.sort_order_field not in self.ordering):
return ((self.sort_order_field,) + tuple(self.ordering))
return self.ordering<|docstring|>Returns a sequence defining the default ordering for results in the
list view.<|endoftext|> |
f60aa2f14d5870303f02b9c3cebef139946093a0e6b8d1eac0ea67f212031a1e | def get_list_display(self, request):
'Add `index_order` as the first column to results'
list_display = list(super().get_list_display(request))
if (self.sort_order_field in list_display):
list_display.remove(self.sort_order_field)
return ('index_order', *list_display) | Add `index_order` as the first column to results | wagtailorderable/modeladmin/mixins.py | get_list_display | kausaltech/wagtail-orderable | 0 | python | def get_list_display(self, request):
list_display = list(super().get_list_display(request))
if (self.sort_order_field in list_display):
list_display.remove(self.sort_order_field)
return ('index_order', *list_display) | def get_list_display(self, request):
list_display = list(super().get_list_display(request))
if (self.sort_order_field in list_display):
list_display.remove(self.sort_order_field)
return ('index_order', *list_display)<|docstring|>Add `index_order` as the first column to results<|endoftext|> |
cd3279f0fbe6e8bb0cb93d283f8beb6901df5bb178a90097e2cdcdc05d5e4ff5 | def get_list_display_add_buttons(self, request):
"\n If `list_display_add_buttons` isn't set, ensure the buttons are not\n added to the `index_order` column.\n "
col_field_name = super(OrderableMixin, self).get_list_display_add_buttons(request)
if (col_field_name == 'index_order'):
list_display = self.get_list_display(request)
return list_display[1]
return col_field_name | If `list_display_add_buttons` isn't set, ensure the buttons are not
added to the `index_order` column. | wagtailorderable/modeladmin/mixins.py | get_list_display_add_buttons | kausaltech/wagtail-orderable | 0 | python | def get_list_display_add_buttons(self, request):
"\n If `list_display_add_buttons` isn't set, ensure the buttons are not\n added to the `index_order` column.\n "
col_field_name = super(OrderableMixin, self).get_list_display_add_buttons(request)
if (col_field_name == 'index_order'):
list_display = self.get_list_display(request)
return list_display[1]
return col_field_name | def get_list_display_add_buttons(self, request):
"\n If `list_display_add_buttons` isn't set, ensure the buttons are not\n added to the `index_order` column.\n "
col_field_name = super(OrderableMixin, self).get_list_display_add_buttons(request)
if (col_field_name == 'index_order'):
list_display = self.get_list_display(request)
return list_display[1]
return col_field_name<|docstring|>If `list_display_add_buttons` isn't set, ensure the buttons are not
added to the `index_order` column.<|endoftext|> |
be630a51176d170e7127fa8000c716e8002f0e79865fea3a117fe4d91b0f6a80 | def get_extra_attrs_for_field_col(self, obj, field_name):
'\n Add data attributes to the `index_order` column that can be picked\n up via JS. The width attribute helps the column remain at a fixed size\n while dragging and the title is used for generating a success message\n on completion reorder completion.\n '
attrs = super(OrderableMixin, self).get_extra_attrs_for_field_col(obj, field_name)
if (field_name == 'index_order'):
attrs.update({'data-title': obj.__str__(), 'width': 20})
return attrs | Add data attributes to the `index_order` column that can be picked
up via JS. The width attribute helps the column remain at a fixed size
while dragging and the title is used for generating a success message
on completion reorder completion. | wagtailorderable/modeladmin/mixins.py | get_extra_attrs_for_field_col | kausaltech/wagtail-orderable | 0 | python | def get_extra_attrs_for_field_col(self, obj, field_name):
'\n Add data attributes to the `index_order` column that can be picked\n up via JS. The width attribute helps the column remain at a fixed size\n while dragging and the title is used for generating a success message\n on completion reorder completion.\n '
attrs = super(OrderableMixin, self).get_extra_attrs_for_field_col(obj, field_name)
if (field_name == 'index_order'):
attrs.update({'data-title': obj.__str__(), 'width': 20})
return attrs | def get_extra_attrs_for_field_col(self, obj, field_name):
'\n Add data attributes to the `index_order` column that can be picked\n up via JS. The width attribute helps the column remain at a fixed size\n while dragging and the title is used for generating a success message\n on completion reorder completion.\n '
attrs = super(OrderableMixin, self).get_extra_attrs_for_field_col(obj, field_name)
if (field_name == 'index_order'):
attrs.update({'data-title': obj.__str__(), 'width': 20})
return attrs<|docstring|>Add data attributes to the `index_order` column that can be picked
up via JS. The width attribute helps the column remain at a fixed size
while dragging and the title is used for generating a success message
on completion reorder completion.<|endoftext|> |
3d53ef704ef4cd928d84476226e234b9527ee30517493130538af82ee17bcb67 | def get_extra_class_names_for_field_col(self, obj, field_name):
'\n Add the `visible-on-drag` class to certain columns\n '
classnames = super(OrderableMixin, self).get_extra_class_names_for_field_col(obj, field_name)
if (field_name in ('index_order', self.list_display[0], 'admin_thumb', (self.list_display_add_buttons or ''))):
classnames.append('visible-on-drag')
return classnames | Add the `visible-on-drag` class to certain columns | wagtailorderable/modeladmin/mixins.py | get_extra_class_names_for_field_col | kausaltech/wagtail-orderable | 0 | python | def get_extra_class_names_for_field_col(self, obj, field_name):
'\n \n '
classnames = super(OrderableMixin, self).get_extra_class_names_for_field_col(obj, field_name)
if (field_name in ('index_order', self.list_display[0], 'admin_thumb', (self.list_display_add_buttons or ))):
classnames.append('visible-on-drag')
return classnames | def get_extra_class_names_for_field_col(self, obj, field_name):
'\n \n '
classnames = super(OrderableMixin, self).get_extra_class_names_for_field_col(obj, field_name)
if (field_name in ('index_order', self.list_display[0], 'admin_thumb', (self.list_display_add_buttons or ))):
classnames.append('visible-on-drag')
return classnames<|docstring|>Add the `visible-on-drag` class to certain columns<|endoftext|> |
87045f84fcd43c0393c65fd76954d0b1639d58042f9b012300790b747c804571 | @transaction.atomic
def reorder_view(self, request, instance_pk):
'\n Very simple view functionality for updating the `sort_order` values\n for objects after a row has been dragged to a new position.\n '
self.fix_duplicate_positions(request)
obj_to_move = get_object_or_404(self.model, pk=instance_pk)
if (not self.permission_helper.user_can_edit_obj(request.user, obj_to_move)):
raise PermissionDenied
old_position = (getattr(obj_to_move, self.sort_order_field) or 0)
(after_position, after) = self._get_position(request.GET.get('after'))
(before_position, before) = self._get_position(request.GET.get('before'))
if after:
position = (after_position or 0)
response = (_('"%s" moved after "%s"') % (obj_to_move, after))
elif before:
position = (before_position or 0)
response = (_('"%s" moved before "%s"') % (obj_to_move, before))
else:
return HttpResponseBadRequest((_('"%s" not moved') % obj_to_move))
qs = self.get_filtered_queryset(request)
signal_kwargs = {'sender': self.__class__, 'queryset': qs}
if (position < old_position):
if (position == after_position):
position += 1
qs = qs.filter(**{('%s__lt' % self.sort_order_field): old_position, ('%s__gte' % self.sort_order_field): position})
update_value = (F(self.sort_order_field) + 1)
signal_kwargs.update({'from_order': position, 'to_position': (old_position + 1)})
elif (position > old_position):
if (position == before_position):
position -= 1
qs = qs.filter(**{('%s__gt' % self.sort_order_field): old_position, ('%s__lte' % self.sort_order_field): position})
update_value = (F(self.sort_order_field) - 1)
signal_kwargs.update({'from_order': (old_position - 1), 'to_position': position})
pre_reorder.send(**signal_kwargs)
qs.update(**{self.sort_order_field: update_value})
self.model.objects.filter(pk=obj_to_move.pk).update(**{self.sort_order_field: position})
post_reorder.send(**signal_kwargs)
return HttpResponse(response) | Very simple view functionality for updating the `sort_order` values
for objects after a row has been dragged to a new position. | wagtailorderable/modeladmin/mixins.py | reorder_view | kausaltech/wagtail-orderable | 0 | python | @transaction.atomic
def reorder_view(self, request, instance_pk):
'\n Very simple view functionality for updating the `sort_order` values\n for objects after a row has been dragged to a new position.\n '
self.fix_duplicate_positions(request)
obj_to_move = get_object_or_404(self.model, pk=instance_pk)
if (not self.permission_helper.user_can_edit_obj(request.user, obj_to_move)):
raise PermissionDenied
old_position = (getattr(obj_to_move, self.sort_order_field) or 0)
(after_position, after) = self._get_position(request.GET.get('after'))
(before_position, before) = self._get_position(request.GET.get('before'))
if after:
position = (after_position or 0)
response = (_('"%s" moved after "%s"') % (obj_to_move, after))
elif before:
position = (before_position or 0)
response = (_('"%s" moved before "%s"') % (obj_to_move, before))
else:
return HttpResponseBadRequest((_('"%s" not moved') % obj_to_move))
qs = self.get_filtered_queryset(request)
signal_kwargs = {'sender': self.__class__, 'queryset': qs}
if (position < old_position):
if (position == after_position):
position += 1
qs = qs.filter(**{('%s__lt' % self.sort_order_field): old_position, ('%s__gte' % self.sort_order_field): position})
update_value = (F(self.sort_order_field) + 1)
signal_kwargs.update({'from_order': position, 'to_position': (old_position + 1)})
elif (position > old_position):
if (position == before_position):
position -= 1
qs = qs.filter(**{('%s__gt' % self.sort_order_field): old_position, ('%s__lte' % self.sort_order_field): position})
update_value = (F(self.sort_order_field) - 1)
signal_kwargs.update({'from_order': (old_position - 1), 'to_position': position})
pre_reorder.send(**signal_kwargs)
qs.update(**{self.sort_order_field: update_value})
self.model.objects.filter(pk=obj_to_move.pk).update(**{self.sort_order_field: position})
post_reorder.send(**signal_kwargs)
return HttpResponse(response) | @transaction.atomic
def reorder_view(self, request, instance_pk):
'\n Very simple view functionality for updating the `sort_order` values\n for objects after a row has been dragged to a new position.\n '
self.fix_duplicate_positions(request)
obj_to_move = get_object_or_404(self.model, pk=instance_pk)
if (not self.permission_helper.user_can_edit_obj(request.user, obj_to_move)):
raise PermissionDenied
old_position = (getattr(obj_to_move, self.sort_order_field) or 0)
(after_position, after) = self._get_position(request.GET.get('after'))
(before_position, before) = self._get_position(request.GET.get('before'))
if after:
position = (after_position or 0)
response = (_('"%s" moved after "%s"') % (obj_to_move, after))
elif before:
position = (before_position or 0)
response = (_('"%s" moved before "%s"') % (obj_to_move, before))
else:
return HttpResponseBadRequest((_('"%s" not moved') % obj_to_move))
qs = self.get_filtered_queryset(request)
signal_kwargs = {'sender': self.__class__, 'queryset': qs}
if (position < old_position):
if (position == after_position):
position += 1
qs = qs.filter(**{('%s__lt' % self.sort_order_field): old_position, ('%s__gte' % self.sort_order_field): position})
update_value = (F(self.sort_order_field) + 1)
signal_kwargs.update({'from_order': position, 'to_position': (old_position + 1)})
elif (position > old_position):
if (position == before_position):
position -= 1
qs = qs.filter(**{('%s__gt' % self.sort_order_field): old_position, ('%s__lte' % self.sort_order_field): position})
update_value = (F(self.sort_order_field) - 1)
signal_kwargs.update({'from_order': (old_position - 1), 'to_position': position})
pre_reorder.send(**signal_kwargs)
qs.update(**{self.sort_order_field: update_value})
self.model.objects.filter(pk=obj_to_move.pk).update(**{self.sort_order_field: position})
post_reorder.send(**signal_kwargs)
return HttpResponse(response)<|docstring|>Very simple view functionality for updating the `sort_order` values
for objects after a row has been dragged to a new position.<|endoftext|> |
32124167380a0274196e726465aca24171406c576fb601137f24bdcd20aba070 | @transaction.atomic
def fix_duplicate_positions(self, request):
'\n Low level function which updates each element to have sequential sort_order values\n if the database contains any duplicate values (gaps are ok).\n '
qs = self.get_filtered_queryset(request)
first_duplicate = qs.values('order').annotate(index_order_count=Count(self.sort_order_field)).filter(index_order_count__gt=1).order_by('order').first()
if (not first_duplicate):
return
lookups = {('%s__gte' % self.sort_order_field): first_duplicate[self.sort_order_field]}
to_reorder = qs.filter(**lookups).order_by(self.sort_order_field).values_list('pk', self.sort_order_field)[1:]
field = self.model._meta.get_field(self.sort_order_field)
when_statements = []
pks = []
bulk_update_qs = self.get_filtered_queryset(request)
new_order = first_duplicate['index_order_count']
for (pk, current_order) in to_reorder:
new_order += 1
if (current_order > new_order):
new_order = (current_order + 1)
continue
if (current_order == new_order):
continue
pks.append(pk)
when_statements.append(When(pk=pk, then=Value(new_order, output_field=field)))
case_statement = Case(*when_statements, output_field=field)
if connections[bulk_update_qs.db].features.requires_casted_case_in_updates:
case_statement = Cast(case_statement, output_field=field)
pre_reorder.send(sender=self.__class__, from_order=(first_duplicate['index_order_count'] + 1), to_order=new_order, queryset=bulk_update_qs)
bulk_update_qs.filter(pk__in=pks).update(**{self.sort_order_field: case_statement})
post_reorder.send(sender=self.__class__, from_order=(first_duplicate['index_order_count'] + 1), to_order=new_order, queryset=bulk_update_qs) | Low level function which updates each element to have sequential sort_order values
if the database contains any duplicate values (gaps are ok). | wagtailorderable/modeladmin/mixins.py | fix_duplicate_positions | kausaltech/wagtail-orderable | 0 | python | @transaction.atomic
def fix_duplicate_positions(self, request):
'\n Low level function which updates each element to have sequential sort_order values\n if the database contains any duplicate values (gaps are ok).\n '
qs = self.get_filtered_queryset(request)
first_duplicate = qs.values('order').annotate(index_order_count=Count(self.sort_order_field)).filter(index_order_count__gt=1).order_by('order').first()
if (not first_duplicate):
return
lookups = {('%s__gte' % self.sort_order_field): first_duplicate[self.sort_order_field]}
to_reorder = qs.filter(**lookups).order_by(self.sort_order_field).values_list('pk', self.sort_order_field)[1:]
field = self.model._meta.get_field(self.sort_order_field)
when_statements = []
pks = []
bulk_update_qs = self.get_filtered_queryset(request)
new_order = first_duplicate['index_order_count']
for (pk, current_order) in to_reorder:
new_order += 1
if (current_order > new_order):
new_order = (current_order + 1)
continue
if (current_order == new_order):
continue
pks.append(pk)
when_statements.append(When(pk=pk, then=Value(new_order, output_field=field)))
case_statement = Case(*when_statements, output_field=field)
if connections[bulk_update_qs.db].features.requires_casted_case_in_updates:
case_statement = Cast(case_statement, output_field=field)
pre_reorder.send(sender=self.__class__, from_order=(first_duplicate['index_order_count'] + 1), to_order=new_order, queryset=bulk_update_qs)
bulk_update_qs.filter(pk__in=pks).update(**{self.sort_order_field: case_statement})
post_reorder.send(sender=self.__class__, from_order=(first_duplicate['index_order_count'] + 1), to_order=new_order, queryset=bulk_update_qs) | @transaction.atomic
def fix_duplicate_positions(self, request):
'\n Low level function which updates each element to have sequential sort_order values\n if the database contains any duplicate values (gaps are ok).\n '
qs = self.get_filtered_queryset(request)
first_duplicate = qs.values('order').annotate(index_order_count=Count(self.sort_order_field)).filter(index_order_count__gt=1).order_by('order').first()
if (not first_duplicate):
return
lookups = {('%s__gte' % self.sort_order_field): first_duplicate[self.sort_order_field]}
to_reorder = qs.filter(**lookups).order_by(self.sort_order_field).values_list('pk', self.sort_order_field)[1:]
field = self.model._meta.get_field(self.sort_order_field)
when_statements = []
pks = []
bulk_update_qs = self.get_filtered_queryset(request)
new_order = first_duplicate['index_order_count']
for (pk, current_order) in to_reorder:
new_order += 1
if (current_order > new_order):
new_order = (current_order + 1)
continue
if (current_order == new_order):
continue
pks.append(pk)
when_statements.append(When(pk=pk, then=Value(new_order, output_field=field)))
case_statement = Case(*when_statements, output_field=field)
if connections[bulk_update_qs.db].features.requires_casted_case_in_updates:
case_statement = Cast(case_statement, output_field=field)
pre_reorder.send(sender=self.__class__, from_order=(first_duplicate['index_order_count'] + 1), to_order=new_order, queryset=bulk_update_qs)
bulk_update_qs.filter(pk__in=pks).update(**{self.sort_order_field: case_statement})
post_reorder.send(sender=self.__class__, from_order=(first_duplicate['index_order_count'] + 1), to_order=new_order, queryset=bulk_update_qs)<|docstring|>Low level function which updates each element to have sequential sort_order values
if the database contains any duplicate values (gaps are ok).<|endoftext|> |
87472880a7e61a1e0112d4ab35f38e1747e1a82abeaded2240ff5b8703d3b67b | def index_order(self, obj):
'Content for the `index_order` column'
return mark_safe(('<div class="handle icon icon-grip text-replace ui-sortable-handle">%s</div>' % _('Drag'))) | Content for the `index_order` column | wagtailorderable/modeladmin/mixins.py | index_order | kausaltech/wagtail-orderable | 0 | python | def index_order(self, obj):
return mark_safe(('<div class="handle icon icon-grip text-replace ui-sortable-handle">%s</div>' % _('Drag'))) | def index_order(self, obj):
return mark_safe(('<div class="handle icon icon-grip text-replace ui-sortable-handle">%s</div>' % _('Drag')))<|docstring|>Content for the `index_order` column<|endoftext|> |
c3c719bfcf41952c12ef412bcc154150c851d82eb6408e6ff70e4c7e63224b2e | def all_but(candidates: List[int], *used: int) -> List[int]:
'Return items in candidates that are not in used.'
leftovers = set(candidates).difference(used)
if (not leftovers):
raise NoChoices()
return [c for c in candidates if (c in leftovers)] | Return items in candidates that are not in used. | python/kenken.py | all_but | drewcsillag/chooser | 0 | python | def all_but(candidates: List[int], *used: int) -> List[int]:
leftovers = set(candidates).difference(used)
if (not leftovers):
raise NoChoices()
return [c for c in candidates if (c in leftovers)] | def all_but(candidates: List[int], *used: int) -> List[int]:
leftovers = set(candidates).difference(used)
if (not leftovers):
raise NoChoices()
return [c for c in candidates if (c in leftovers)]<|docstring|>Return items in candidates that are not in used.<|endoftext|> |
15daa36e98dab129c93cc27b806209c5a29a58eb75b5c6d2ed58346031a0c078 | def add_choice(row: List[int], c: Chooser, *used: int) -> None:
'Choose a item from [1-4] excluding ones that have been used already)\n and append it to row.'
row.append(c.choose(all_but(ONE_TO_FOUR, *used))) | Choose a item from [1-4] excluding ones that have been used already)
and append it to row. | python/kenken.py | add_choice | drewcsillag/chooser | 0 | python | def add_choice(row: List[int], c: Chooser, *used: int) -> None:
'Choose a item from [1-4] excluding ones that have been used already)\n and append it to row.'
row.append(c.choose(all_but(ONE_TO_FOUR, *used))) | def add_choice(row: List[int], c: Chooser, *used: int) -> None:
'Choose a item from [1-4] excluding ones that have been used already)\n and append it to row.'
row.append(c.choose(all_but(ONE_TO_FOUR, *used)))<|docstring|>Choose a item from [1-4] excluding ones that have been used already)
and append it to row.<|endoftext|> |
825292fa4c7072257a4bcdb38f5a6af04f9184bd0e94b33f63043ca2e52883c5 | @property
def pty(self):
' The :class:`deployer.pseudo_terminal.Pty` of this console. '
return self._pty | The :class:`deployer.pseudo_terminal.Pty` of this console. | deployer/console.py | pty | nikhilrane1992/python-deployer | 39 | python | @property
def pty(self):
' '
return self._pty | @property
def pty(self):
' '
return self._pty<|docstring|>The :class:`deployer.pseudo_terminal.Pty` of this console.<|endoftext|> |
3e7c85b96180ff205ef07f100e246966cdfbe728eaabde5fae6bcd09e67fbcc2 | @property
def is_interactive(self):
"\n When ``False`` don't ask for input and choose the default options when\n possible.\n "
return self._pty.interactive | When ``False`` don't ask for input and choose the default options when
possible. | deployer/console.py | is_interactive | nikhilrane1992/python-deployer | 39 | python | @property
def is_interactive(self):
"\n When ``False`` don't ask for input and choose the default options when\n possible.\n "
return self._pty.interactive | @property
def is_interactive(self):
"\n When ``False`` don't ask for input and choose the default options when\n possible.\n "
return self._pty.interactive<|docstring|>When ``False`` don't ask for input and choose the default options when
possible.<|endoftext|> |
9f6fd69d410f74fe6b7b5a201144b47d929c7f2490b7bcb3ec97e2cec464882f | def input(self, label, is_password=False, answers=None, default=None):
'\n Ask for plain text input. (Similar to raw_input.)\n\n :param is_password: Show stars instead of the actual user input.\n :type is_password: bool\n :param answers: A list of the accepted answers or None.\n :param default: Default answer.\n '
stdin = self._pty.stdin
stdout = self._pty.stdout
def print_question():
answers_str = ((' [%s]' % ','.join(answers)) if answers else '')
default_str = ((' (default=%s)' % default) if (default is not None) else '')
stdout.write(colored((' %s%s%s: ' % (label, answers_str, default_str)), 'cyan'))
stdout.flush()
def read_answer():
value = ''
print_question()
while True:
c = stdin.read(1)
if ((c in ('\r', '\n')) and (value or default)):
stdout.write('\r\n')
break
elif ((c == '\x7f') and value):
stdout.write('\x08 \x08')
value = value[:(- 1)]
elif (ord(c) in range(32, 127)):
stdout.write(colored(('*' if is_password else c), attrs=['bold']))
value += c
elif (c == '\x03'):
raise NoInput
stdout.flush()
if ((not value) and (default is not None)):
return default
else:
return value
with std.raw_mode(stdin):
while True:
if self._pty.interactive:
value = read_answer()
elif (default is not None):
print_question()
stdout.write(('[non interactive] %r\r\n' % default))
stdout.flush()
value = default
else:
value = read_answer()
if ((not answers) or (value in answers)):
return value
else:
stdout.write('Invalid answer.\r\n')
stdout.flush() | Ask for plain text input. (Similar to raw_input.)
:param is_password: Show stars instead of the actual user input.
:type is_password: bool
:param answers: A list of the accepted answers or None.
:param default: Default answer. | deployer/console.py | input | nikhilrane1992/python-deployer | 39 | python | def input(self, label, is_password=False, answers=None, default=None):
'\n Ask for plain text input. (Similar to raw_input.)\n\n :param is_password: Show stars instead of the actual user input.\n :type is_password: bool\n :param answers: A list of the accepted answers or None.\n :param default: Default answer.\n '
stdin = self._pty.stdin
stdout = self._pty.stdout
def print_question():
answers_str = ((' [%s]' % ','.join(answers)) if answers else )
default_str = ((' (default=%s)' % default) if (default is not None) else )
stdout.write(colored((' %s%s%s: ' % (label, answers_str, default_str)), 'cyan'))
stdout.flush()
def read_answer():
value =
print_question()
while True:
c = stdin.read(1)
if ((c in ('\r', '\n')) and (value or default)):
stdout.write('\r\n')
break
elif ((c == '\x7f') and value):
stdout.write('\x08 \x08')
value = value[:(- 1)]
elif (ord(c) in range(32, 127)):
stdout.write(colored(('*' if is_password else c), attrs=['bold']))
value += c
elif (c == '\x03'):
raise NoInput
stdout.flush()
if ((not value) and (default is not None)):
return default
else:
return value
with std.raw_mode(stdin):
while True:
if self._pty.interactive:
value = read_answer()
elif (default is not None):
print_question()
stdout.write(('[non interactive] %r\r\n' % default))
stdout.flush()
value = default
else:
value = read_answer()
if ((not answers) or (value in answers)):
return value
else:
stdout.write('Invalid answer.\r\n')
stdout.flush() | def input(self, label, is_password=False, answers=None, default=None):
'\n Ask for plain text input. (Similar to raw_input.)\n\n :param is_password: Show stars instead of the actual user input.\n :type is_password: bool\n :param answers: A list of the accepted answers or None.\n :param default: Default answer.\n '
stdin = self._pty.stdin
stdout = self._pty.stdout
def print_question():
answers_str = ((' [%s]' % ','.join(answers)) if answers else )
default_str = ((' (default=%s)' % default) if (default is not None) else )
stdout.write(colored((' %s%s%s: ' % (label, answers_str, default_str)), 'cyan'))
stdout.flush()
def read_answer():
value =
print_question()
while True:
c = stdin.read(1)
if ((c in ('\r', '\n')) and (value or default)):
stdout.write('\r\n')
break
elif ((c == '\x7f') and value):
stdout.write('\x08 \x08')
value = value[:(- 1)]
elif (ord(c) in range(32, 127)):
stdout.write(colored(('*' if is_password else c), attrs=['bold']))
value += c
elif (c == '\x03'):
raise NoInput
stdout.flush()
if ((not value) and (default is not None)):
return default
else:
return value
with std.raw_mode(stdin):
while True:
if self._pty.interactive:
value = read_answer()
elif (default is not None):
print_question()
stdout.write(('[non interactive] %r\r\n' % default))
stdout.flush()
value = default
else:
value = read_answer()
if ((not answers) or (value in answers)):
return value
else:
stdout.write('Invalid answer.\r\n')
stdout.flush()<|docstring|>Ask for plain text input. (Similar to raw_input.)
:param is_password: Show stars instead of the actual user input.
:type is_password: bool
:param answers: A list of the accepted answers or None.
:param default: Default answer.<|endoftext|> |
3e39be3ee0f8a5891931c0466de0859bc625fe58e062de5d24926ee9d04b988d | def choice(self, question, options, allow_random=False, default=None):
"\n :param options: List of (name, value) tuples.\n :type options: list\n :param allow_random: If ``True``, the default option becomes 'choose random'.\n :type allow_random: bool\n "
if (len(options) == 0):
raise NoInput('No options given.')
if (allow_random and (default is not None)):
raise Exception("Please don't provide allow_random and default parameter at the same time.")
options = sorted(options, key=(lambda i: i[0]))
while True:
self._pty.stdout.write(colored((' %s\n' % question), 'cyan'))
self.lesspipe((('%10i %s' % ((i + 1), tuple_[0])) for (i, tuple_) in enumerate(options)))
if allow_random:
default = 'random'
elif (default is not None):
try:
default = ([o[1] for o in options].index(default) + 1)
except ValueError:
raise Exception('The default value does not appear in the options list.')
result = self.input(question, default=('random' if allow_random else default))
if (allow_random and (result == 'random')):
return random.choice(options)[1]
else:
try:
result = int(result)
if (1 <= result <= len(options)):
return options[(result - 1)][1]
except ValueError:
pass
self.warning('Invalid input') | :param options: List of (name, value) tuples.
:type options: list
:param allow_random: If ``True``, the default option becomes 'choose random'.
:type allow_random: bool | deployer/console.py | choice | nikhilrane1992/python-deployer | 39 | python | def choice(self, question, options, allow_random=False, default=None):
"\n :param options: List of (name, value) tuples.\n :type options: list\n :param allow_random: If ``True``, the default option becomes 'choose random'.\n :type allow_random: bool\n "
if (len(options) == 0):
raise NoInput('No options given.')
if (allow_random and (default is not None)):
raise Exception("Please don't provide allow_random and default parameter at the same time.")
options = sorted(options, key=(lambda i: i[0]))
while True:
self._pty.stdout.write(colored((' %s\n' % question), 'cyan'))
self.lesspipe((('%10i %s' % ((i + 1), tuple_[0])) for (i, tuple_) in enumerate(options)))
if allow_random:
default = 'random'
elif (default is not None):
try:
default = ([o[1] for o in options].index(default) + 1)
except ValueError:
raise Exception('The default value does not appear in the options list.')
result = self.input(question, default=('random' if allow_random else default))
if (allow_random and (result == 'random')):
return random.choice(options)[1]
else:
try:
result = int(result)
if (1 <= result <= len(options)):
return options[(result - 1)][1]
except ValueError:
pass
self.warning('Invalid input') | def choice(self, question, options, allow_random=False, default=None):
"\n :param options: List of (name, value) tuples.\n :type options: list\n :param allow_random: If ``True``, the default option becomes 'choose random'.\n :type allow_random: bool\n "
if (len(options) == 0):
raise NoInput('No options given.')
if (allow_random and (default is not None)):
raise Exception("Please don't provide allow_random and default parameter at the same time.")
options = sorted(options, key=(lambda i: i[0]))
while True:
self._pty.stdout.write(colored((' %s\n' % question), 'cyan'))
self.lesspipe((('%10i %s' % ((i + 1), tuple_[0])) for (i, tuple_) in enumerate(options)))
if allow_random:
default = 'random'
elif (default is not None):
try:
default = ([o[1] for o in options].index(default) + 1)
except ValueError:
raise Exception('The default value does not appear in the options list.')
result = self.input(question, default=('random' if allow_random else default))
if (allow_random and (result == 'random')):
return random.choice(options)[1]
else:
try:
result = int(result)
if (1 <= result <= len(options)):
return options[(result - 1)][1]
except ValueError:
pass
self.warning('Invalid input')<|docstring|>:param options: List of (name, value) tuples.
:type options: list
:param allow_random: If ``True``, the default option becomes 'choose random'.
:type allow_random: bool<|endoftext|> |
01fc1ca29f05a0ddc100c5f15513d0f16883f19e866d53c6e9439cca6e8fff1f | def confirm(self, question, default=None):
"\n Print this yes/no question, and return ``True`` when the user answers\n 'Yes'.\n "
answer = 'invalid'
if (default is not None):
assert isinstance(default, bool)
default = ('y' if default else 'n')
while (answer not in ('yes', 'no', 'y', 'n')):
answer = self.input((question + ' [y/n]'), default=default)
return (answer in ('yes', 'y')) | Print this yes/no question, and return ``True`` when the user answers
'Yes'. | deployer/console.py | confirm | nikhilrane1992/python-deployer | 39 | python | def confirm(self, question, default=None):
"\n Print this yes/no question, and return ``True`` when the user answers\n 'Yes'.\n "
answer = 'invalid'
if (default is not None):
assert isinstance(default, bool)
default = ('y' if default else 'n')
while (answer not in ('yes', 'no', 'y', 'n')):
answer = self.input((question + ' [y/n]'), default=default)
return (answer in ('yes', 'y')) | def confirm(self, question, default=None):
"\n Print this yes/no question, and return ``True`` when the user answers\n 'Yes'.\n "
answer = 'invalid'
if (default is not None):
assert isinstance(default, bool)
default = ('y' if default else 'n')
while (answer not in ('yes', 'no', 'y', 'n')):
answer = self.input((question + ' [y/n]'), default=default)
return (answer in ('yes', 'y'))<|docstring|>Print this yes/no question, and return ``True`` when the user answers
'Yes'.<|endoftext|> |
ca954a558acfd423e00b1696c31fbd22bbaf29d3308509f79026e8fa118489cb | def select_node(self, root_node, prompt='Select a node', filter=None):
'\n Show autocompletion for node selection.\n '
from deployer.cli import ExitCLILoop, Handler, HandlerType, CLInterface
class NodeHandler(Handler):
def __init__(self, node):
self.node = node
@property
def is_leaf(self):
return ((not filter) or filter(self.node))
@property
def handler_type(self):
class NodeType(HandlerType):
color = self.node.get_group().color
return NodeType()
def complete_subhandlers(self, part):
for (name, subnode) in self.node.get_subnodes():
if name.startswith(part):
(yield (name, NodeHandler(subnode)))
def get_subhandler(self, name):
if self.node.has_subnode(name):
subnode = self.node.get_subnode(name)
return NodeHandler(subnode)
def __call__(self, context):
raise ExitCLILoop(self.node)
root_handler = NodeHandler(root_node)
class Shell(CLInterface):
@property
def prompt(self):
return colored(('\n%s > ' % prompt), 'cyan')
not_found_message = 'Node not found...'
not_a_leaf_message = 'Not a valid node...'
node_result = Shell(self._pty, root_handler).cmdloop()
if (not node_result):
raise NoInput
return self.select_node_isolation(node_result) | Show autocompletion for node selection. | deployer/console.py | select_node | nikhilrane1992/python-deployer | 39 | python | def select_node(self, root_node, prompt='Select a node', filter=None):
'\n \n '
from deployer.cli import ExitCLILoop, Handler, HandlerType, CLInterface
class NodeHandler(Handler):
def __init__(self, node):
self.node = node
@property
def is_leaf(self):
return ((not filter) or filter(self.node))
@property
def handler_type(self):
class NodeType(HandlerType):
color = self.node.get_group().color
return NodeType()
def complete_subhandlers(self, part):
for (name, subnode) in self.node.get_subnodes():
if name.startswith(part):
(yield (name, NodeHandler(subnode)))
def get_subhandler(self, name):
if self.node.has_subnode(name):
subnode = self.node.get_subnode(name)
return NodeHandler(subnode)
def __call__(self, context):
raise ExitCLILoop(self.node)
root_handler = NodeHandler(root_node)
class Shell(CLInterface):
@property
def prompt(self):
return colored(('\n%s > ' % prompt), 'cyan')
not_found_message = 'Node not found...'
not_a_leaf_message = 'Not a valid node...'
node_result = Shell(self._pty, root_handler).cmdloop()
if (not node_result):
raise NoInput
return self.select_node_isolation(node_result) | def select_node(self, root_node, prompt='Select a node', filter=None):
'\n \n '
from deployer.cli import ExitCLILoop, Handler, HandlerType, CLInterface
class NodeHandler(Handler):
def __init__(self, node):
self.node = node
@property
def is_leaf(self):
return ((not filter) or filter(self.node))
@property
def handler_type(self):
class NodeType(HandlerType):
color = self.node.get_group().color
return NodeType()
def complete_subhandlers(self, part):
for (name, subnode) in self.node.get_subnodes():
if name.startswith(part):
(yield (name, NodeHandler(subnode)))
def get_subhandler(self, name):
if self.node.has_subnode(name):
subnode = self.node.get_subnode(name)
return NodeHandler(subnode)
def __call__(self, context):
raise ExitCLILoop(self.node)
root_handler = NodeHandler(root_node)
class Shell(CLInterface):
@property
def prompt(self):
return colored(('\n%s > ' % prompt), 'cyan')
not_found_message = 'Node not found...'
not_a_leaf_message = 'Not a valid node...'
node_result = Shell(self._pty, root_handler).cmdloop()
if (not node_result):
raise NoInput
return self.select_node_isolation(node_result)<|docstring|>Show autocompletion for node selection.<|endoftext|> |
8928c003a1d65781f01e9e331487ae67ef66fe41ae46b2f9921cbccbe0614a85 | def select_node_isolation(self, node):
'\n Ask for a host, from a list of hosts.\n '
from deployer.inspection import Inspector
from deployer.node import IsolationIdentifierType
options = [(' '.join([('%s (%s)' % (h.slug, h.address)) for h in hosts]), node) for (hosts, node) in Inspector(node).iter_isolations(identifier_type=IsolationIdentifierType.HOST_TUPLES)]
if (len(options) > 1):
return self.choice('Choose a host', options, allow_random=True)
else:
return options[0][1] | Ask for a host, from a list of hosts. | deployer/console.py | select_node_isolation | nikhilrane1992/python-deployer | 39 | python | def select_node_isolation(self, node):
'\n \n '
from deployer.inspection import Inspector
from deployer.node import IsolationIdentifierType
options = [(' '.join([('%s (%s)' % (h.slug, h.address)) for h in hosts]), node) for (hosts, node) in Inspector(node).iter_isolations(identifier_type=IsolationIdentifierType.HOST_TUPLES)]
if (len(options) > 1):
return self.choice('Choose a host', options, allow_random=True)
else:
return options[0][1] | def select_node_isolation(self, node):
'\n \n '
from deployer.inspection import Inspector
from deployer.node import IsolationIdentifierType
options = [(' '.join([('%s (%s)' % (h.slug, h.address)) for h in hosts]), node) for (hosts, node) in Inspector(node).iter_isolations(identifier_type=IsolationIdentifierType.HOST_TUPLES)]
if (len(options) > 1):
return self.choice('Choose a host', options, allow_random=True)
else:
return options[0][1]<|docstring|>Ask for a host, from a list of hosts.<|endoftext|> |
1e93e074e41c5dc34362293bdca960f2076c55dad5bbcfd695d6cd1e5a59062e | def lesspipe(self, line_iterator):
'\n Paginator for output. This will print one page at a time. When the user\n presses a key, the next page is printed. ``Ctrl-c`` or ``q`` will quit\n the paginator.\n\n :param line_iterator: A generator function that yields lines (without\n trailing newline)\n '
stdin = self._pty.stdin
stdout = self._pty.stdout
height = (self._pty.get_size()[0] - 1)
with std.raw_mode(stdin):
lines = 0
for l in line_iterator:
stdout.write(l)
stdout.write('\r\n')
lines += 1
if (lines == height):
stdout.write(colored(' Press enter to continue...', 'cyan'))
stdout.flush()
try:
c = stdin.read(1)
if (c in ('\x03', 'q')):
stdout.write('\r\n')
stdout.flush()
return
except IOError:
pass
stdout.write('\x1b[40D\x1b[K')
lines = 0
stdout.flush() | Paginator for output. This will print one page at a time. When the user
presses a key, the next page is printed. ``Ctrl-c`` or ``q`` will quit
the paginator.
:param line_iterator: A generator function that yields lines (without
trailing newline) | deployer/console.py | lesspipe | nikhilrane1992/python-deployer | 39 | python | def lesspipe(self, line_iterator):
'\n Paginator for output. This will print one page at a time. When the user\n presses a key, the next page is printed. ``Ctrl-c`` or ``q`` will quit\n the paginator.\n\n :param line_iterator: A generator function that yields lines (without\n trailing newline)\n '
stdin = self._pty.stdin
stdout = self._pty.stdout
height = (self._pty.get_size()[0] - 1)
with std.raw_mode(stdin):
lines = 0
for l in line_iterator:
stdout.write(l)
stdout.write('\r\n')
lines += 1
if (lines == height):
stdout.write(colored(' Press enter to continue...', 'cyan'))
stdout.flush()
try:
c = stdin.read(1)
if (c in ('\x03', 'q')):
stdout.write('\r\n')
stdout.flush()
return
except IOError:
pass
stdout.write('\x1b[40D\x1b[K')
lines = 0
stdout.flush() | def lesspipe(self, line_iterator):
'\n Paginator for output. This will print one page at a time. When the user\n presses a key, the next page is printed. ``Ctrl-c`` or ``q`` will quit\n the paginator.\n\n :param line_iterator: A generator function that yields lines (without\n trailing newline)\n '
stdin = self._pty.stdin
stdout = self._pty.stdout
height = (self._pty.get_size()[0] - 1)
with std.raw_mode(stdin):
lines = 0
for l in line_iterator:
stdout.write(l)
stdout.write('\r\n')
lines += 1
if (lines == height):
stdout.write(colored(' Press enter to continue...', 'cyan'))
stdout.flush()
try:
c = stdin.read(1)
if (c in ('\x03', 'q')):
stdout.write('\r\n')
stdout.flush()
return
except IOError:
pass
stdout.write('\x1b[40D\x1b[K')
lines = 0
stdout.flush()<|docstring|>Paginator for output. This will print one page at a time. When the user
presses a key, the next page is printed. ``Ctrl-c`` or ``q`` will quit
the paginator.
:param line_iterator: A generator function that yields lines (without
trailing newline)<|endoftext|> |
280e9e879f72a7c102957e2e608cd32b9fd0bf96ee0e8da30f82c082bf48f08c | def in_columns(self, item_iterator, margin_left=0):
'\n :param item_iterator: An iterable, which yields either ``basestring``\n instances, or (colored_item, length) tuples.\n '
def get_length(item):
return (len(item) if isinstance(item, basestring) else item[1])
def get_text(item):
return (item if isinstance(item, basestring) else item[0])
all_items = list(item_iterator)
if (not all_items):
return
max_length = (max(map(get_length, all_items)) + 1)
term_width = (self._pty.get_size()[1] - margin_left)
words_per_line = max((term_width / max_length), 1)
margin = (' ' * margin_left)
line = [margin]
for (i, j) in enumerate(all_items):
line.append(get_text(j))
if (((i + 1) % words_per_line) == 0):
(yield ''.join(line))
line = [margin]
else:
line.append((' ' * (max_length - get_length(j))))
(yield ''.join(line)) | :param item_iterator: An iterable, which yields either ``basestring``
instances, or (colored_item, length) tuples. | deployer/console.py | in_columns | nikhilrane1992/python-deployer | 39 | python | def in_columns(self, item_iterator, margin_left=0):
'\n :param item_iterator: An iterable, which yields either ``basestring``\n instances, or (colored_item, length) tuples.\n '
def get_length(item):
return (len(item) if isinstance(item, basestring) else item[1])
def get_text(item):
return (item if isinstance(item, basestring) else item[0])
all_items = list(item_iterator)
if (not all_items):
return
max_length = (max(map(get_length, all_items)) + 1)
term_width = (self._pty.get_size()[1] - margin_left)
words_per_line = max((term_width / max_length), 1)
margin = (' ' * margin_left)
line = [margin]
for (i, j) in enumerate(all_items):
line.append(get_text(j))
if (((i + 1) % words_per_line) == 0):
(yield .join(line))
line = [margin]
else:
line.append((' ' * (max_length - get_length(j))))
(yield .join(line)) | def in_columns(self, item_iterator, margin_left=0):
'\n :param item_iterator: An iterable, which yields either ``basestring``\n instances, or (colored_item, length) tuples.\n '
def get_length(item):
return (len(item) if isinstance(item, basestring) else item[1])
def get_text(item):
return (item if isinstance(item, basestring) else item[0])
all_items = list(item_iterator)
if (not all_items):
return
max_length = (max(map(get_length, all_items)) + 1)
term_width = (self._pty.get_size()[1] - margin_left)
words_per_line = max((term_width / max_length), 1)
margin = (' ' * margin_left)
line = [margin]
for (i, j) in enumerate(all_items):
line.append(get_text(j))
if (((i + 1) % words_per_line) == 0):
(yield .join(line))
line = [margin]
else:
line.append((' ' * (max_length - get_length(j))))
(yield .join(line))<|docstring|>:param item_iterator: An iterable, which yields either ``basestring``
instances, or (colored_item, length) tuples.<|endoftext|> |
e1db62528c18a7a7e767181bcf3500ed9fcc92415e5884afcd9ebb42541160ca | def warning(self, text):
'\n Print a warning.\n '
stdout = self._pty.stdout
stdout.write(colored('*** ', 'yellow'))
stdout.write(colored('WARNING: ', 'red'))
stdout.write(colored(text, 'red', attrs=['bold']))
stdout.write(colored(' ***\n', 'yellow'))
stdout.flush() | Print a warning. | deployer/console.py | warning | nikhilrane1992/python-deployer | 39 | python | def warning(self, text):
'\n \n '
stdout = self._pty.stdout
stdout.write(colored('*** ', 'yellow'))
stdout.write(colored('WARNING: ', 'red'))
stdout.write(colored(text, 'red', attrs=['bold']))
stdout.write(colored(' ***\n', 'yellow'))
stdout.flush() | def warning(self, text):
'\n \n '
stdout = self._pty.stdout
stdout.write(colored('*** ', 'yellow'))
stdout.write(colored('WARNING: ', 'red'))
stdout.write(colored(text, 'red', attrs=['bold']))
stdout.write(colored(' ***\n', 'yellow'))
stdout.flush()<|docstring|>Print a warning.<|endoftext|> |
32611680ae64bc631b77e4a1ee5e8c3b1d7f32b3b54fb8526c55e927b52dc197 | def progress_bar(self, message, expected=None, clear_on_finish=False, format_str=None):
"\n Display a progress bar. This returns a Python context manager.\n Call the next() method to increase the counter.\n\n ::\n\n with console.progress_bar('Looking for nodes') as p:\n for i in range(0, 1000):\n p.next()\n ...\n\n :returns: :class:`ProgressBar` instance.\n :param message: Text label of the progress bar.\n "
return ProgressBar(self._pty, message, expected=expected, clear_on_finish=clear_on_finish, format_str=format_str) | Display a progress bar. This returns a Python context manager.
Call the next() method to increase the counter.
::
with console.progress_bar('Looking for nodes') as p:
for i in range(0, 1000):
p.next()
...
:returns: :class:`ProgressBar` instance.
:param message: Text label of the progress bar. | deployer/console.py | progress_bar | nikhilrane1992/python-deployer | 39 | python | def progress_bar(self, message, expected=None, clear_on_finish=False, format_str=None):
"\n Display a progress bar. This returns a Python context manager.\n Call the next() method to increase the counter.\n\n ::\n\n with console.progress_bar('Looking for nodes') as p:\n for i in range(0, 1000):\n p.next()\n ...\n\n :returns: :class:`ProgressBar` instance.\n :param message: Text label of the progress bar.\n "
return ProgressBar(self._pty, message, expected=expected, clear_on_finish=clear_on_finish, format_str=format_str) | def progress_bar(self, message, expected=None, clear_on_finish=False, format_str=None):
"\n Display a progress bar. This returns a Python context manager.\n Call the next() method to increase the counter.\n\n ::\n\n with console.progress_bar('Looking for nodes') as p:\n for i in range(0, 1000):\n p.next()\n ...\n\n :returns: :class:`ProgressBar` instance.\n :param message: Text label of the progress bar.\n "
return ProgressBar(self._pty, message, expected=expected, clear_on_finish=clear_on_finish, format_str=format_str)<|docstring|>Display a progress bar. This returns a Python context manager.
Call the next() method to increase the counter.
::
with console.progress_bar('Looking for nodes') as p:
for i in range(0, 1000):
p.next()
...
:returns: :class:`ProgressBar` instance.
:param message: Text label of the progress bar.<|endoftext|> |
943021c75eadd70864d0b13009f8b8015d9776aeb396174c405bae47a8933a7f | def progress_bar_with_steps(self, message, steps, format_str=None):
'\n Display a progress bar with steps.\n\n ::\n\n steps = ProgressBarSteps({\n 1: "Resolving address",\n 2: "Create transport",\n 3: "Get remote key",\n 4: "Authenticating" })\n\n with console.progress_bar_with_steps(\'Connecting to SSH server\', steps=steps) as p:\n ...\n p.set_progress(1)\n ...\n p.set_progress(2)\n ...\n\n :param steps: :class:`ProgressBarSteps` instance.\n :param message: Text label of the progress bar.\n '
return ProgressBar(self._pty, message, steps=steps, format_str=format_str) | Display a progress bar with steps.
::
steps = ProgressBarSteps({
1: "Resolving address",
2: "Create transport",
3: "Get remote key",
4: "Authenticating" })
with console.progress_bar_with_steps('Connecting to SSH server', steps=steps) as p:
...
p.set_progress(1)
...
p.set_progress(2)
...
:param steps: :class:`ProgressBarSteps` instance.
:param message: Text label of the progress bar. | deployer/console.py | progress_bar_with_steps | nikhilrane1992/python-deployer | 39 | python | def progress_bar_with_steps(self, message, steps, format_str=None):
'\n Display a progress bar with steps.\n\n ::\n\n steps = ProgressBarSteps({\n 1: "Resolving address",\n 2: "Create transport",\n 3: "Get remote key",\n 4: "Authenticating" })\n\n with console.progress_bar_with_steps(\'Connecting to SSH server\', steps=steps) as p:\n ...\n p.set_progress(1)\n ...\n p.set_progress(2)\n ...\n\n :param steps: :class:`ProgressBarSteps` instance.\n :param message: Text label of the progress bar.\n '
return ProgressBar(self._pty, message, steps=steps, format_str=format_str) | def progress_bar_with_steps(self, message, steps, format_str=None):
'\n Display a progress bar with steps.\n\n ::\n\n steps = ProgressBarSteps({\n 1: "Resolving address",\n 2: "Create transport",\n 3: "Get remote key",\n 4: "Authenticating" })\n\n with console.progress_bar_with_steps(\'Connecting to SSH server\', steps=steps) as p:\n ...\n p.set_progress(1)\n ...\n p.set_progress(2)\n ...\n\n :param steps: :class:`ProgressBarSteps` instance.\n :param message: Text label of the progress bar.\n '
return ProgressBar(self._pty, message, steps=steps, format_str=format_str)<|docstring|>Display a progress bar with steps.
::
steps = ProgressBarSteps({
1: "Resolving address",
2: "Create transport",
3: "Get remote key",
4: "Authenticating" })
with console.progress_bar_with_steps('Connecting to SSH server', steps=steps) as p:
...
p.set_progress(1)
...
p.set_progress(2)
...
:param steps: :class:`ProgressBarSteps` instance.
:param message: Text label of the progress bar.<|endoftext|> |
9578c27d84ffa11e5ef3de644431c9888ac4f8afa2f80bf2f73d059a96eedab1 | def next(self):
'\n Increment progress bar counter.\n '
self.set_progress((self.counter + 1), rewrite=False) | Increment progress bar counter. | deployer/console.py | next | nikhilrane1992/python-deployer | 39 | python | def next(self):
'\n \n '
self.set_progress((self.counter + 1), rewrite=False) | def next(self):
'\n \n '
self.set_progress((self.counter + 1), rewrite=False)<|docstring|>Increment progress bar counter.<|endoftext|> |
613234cfbdd1208273432f608763be8c839fb79c307838675be4fc965864d18b | def set_progress(self, value, rewrite=True):
'\n Set counter to this value.\n\n :param rewrite: Always redraw the progress bar.\n :type rewrite: bool\n '
self.counter = value
delta = (((datetime.now() - self._last_print).microseconds / 1000) / 1000.0)
if (rewrite or (delta > self.interval)):
self._print()
self._last_print = datetime.now() | Set counter to this value.
:param rewrite: Always redraw the progress bar.
:type rewrite: bool | deployer/console.py | set_progress | nikhilrane1992/python-deployer | 39 | python | def set_progress(self, value, rewrite=True):
'\n Set counter to this value.\n\n :param rewrite: Always redraw the progress bar.\n :type rewrite: bool\n '
self.counter = value
delta = (((datetime.now() - self._last_print).microseconds / 1000) / 1000.0)
if (rewrite or (delta > self.interval)):
self._print()
self._last_print = datetime.now() | def set_progress(self, value, rewrite=True):
'\n Set counter to this value.\n\n :param rewrite: Always redraw the progress bar.\n :type rewrite: bool\n '
self.counter = value
delta = (((datetime.now() - self._last_print).microseconds / 1000) / 1000.0)
if (rewrite or (delta > self.interval)):
self._print()
self._last_print = datetime.now()<|docstring|>Set counter to this value.
:param rewrite: Always redraw the progress bar.
:type rewrite: bool<|endoftext|> |
cbf462601754f46041265c3833c14a8a823b6f5cb8d3f4af609875f2a02fa2fe | @expectedFlakeyLinux('llvm.org/pr19310')
@expectedFailureFreeBSD('llvm.org/pr19310')
@skipIfRemote
@dwarf_test
def test_attach_continue_interrupt_detach(self):
'Test attach/continue/interrupt/detach'
self.buildDwarf()
self.process_attach_continue_interrupt_detach() | Test attach/continue/interrupt/detach | 3.7.0/lldb-3.7.0.src/test/functionalities/attach_resume/TestAttachResume.py | test_attach_continue_interrupt_detach | androm3da/clang_sles | 3 | python | @expectedFlakeyLinux('llvm.org/pr19310')
@expectedFailureFreeBSD('llvm.org/pr19310')
@skipIfRemote
@dwarf_test
def test_attach_continue_interrupt_detach(self):
self.buildDwarf()
self.process_attach_continue_interrupt_detach() | @expectedFlakeyLinux('llvm.org/pr19310')
@expectedFailureFreeBSD('llvm.org/pr19310')
@skipIfRemote
@dwarf_test
def test_attach_continue_interrupt_detach(self):
self.buildDwarf()
self.process_attach_continue_interrupt_detach()<|docstring|>Test attach/continue/interrupt/detach<|endoftext|> |
e4f25eb652f4257762914a4034286d0027966d707074ac20d6e668a8613dee26 | @expectedFlakeyLinux('llvm.org/pr19478')
@skipIfRemote
def process_attach_continue_interrupt_detach(self):
'Test attach/continue/interrupt/detach'
exe = os.path.join(os.getcwd(), exe_name)
popen = self.spawnSubprocess(exe)
self.addTearDownHook(self.cleanupSubprocesses)
self.runCmd(('process attach -p ' + str(popen.pid)))
self._state = 0
def process_events():
event = lldb.SBEvent()
while self.dbg.GetListener().GetNextEvent(event):
self._state = lldb.SBProcess.GetStateFromEvent(event)
def wait_for_state(s, timeout=5):
t = 0
period = 0.1
while (self._state != s):
process_events()
time.sleep(period)
t += period
if (t > timeout):
return False
return True
self.setAsync(True)
self.runCmd('c')
self.assertTrue(wait_for_state(lldb.eStateRunning), 'Process not running after continue')
self.runCmd('process interrupt')
self.assertTrue(wait_for_state(lldb.eStateStopped), 'Process not stopped after interrupt')
self.runCmd('c')
self.assertTrue(wait_for_state(lldb.eStateRunning), 'Process not running after continue')
self.runCmd('process interrupt')
self.assertTrue(wait_for_state(lldb.eStateStopped), 'Process not stopped after interrupt')
self.runCmd(('br set -f main.cpp -l %u' % line_number('main.cpp', '// Set breakpoint here')))
self.runCmd('c')
self.assertTrue(wait_for_state(lldb.eStateRunning), 'Process not running after continue')
self.assertTrue(wait_for_state(lldb.eStateStopped), 'Process not stopped after breakpoint')
self.expect('br list', 'Breakpoint not hit', patterns=['hit count = [1-9]'])
self.runCmd('c')
self.assertTrue(wait_for_state(lldb.eStateRunning), 'Process not running after continue')
self.runCmd('detach')
self.assertTrue(wait_for_state(lldb.eStateDetached), 'Process not detached after detach') | Test attach/continue/interrupt/detach | 3.7.0/lldb-3.7.0.src/test/functionalities/attach_resume/TestAttachResume.py | process_attach_continue_interrupt_detach | androm3da/clang_sles | 3 | python | @expectedFlakeyLinux('llvm.org/pr19478')
@skipIfRemote
def process_attach_continue_interrupt_detach(self):
exe = os.path.join(os.getcwd(), exe_name)
popen = self.spawnSubprocess(exe)
self.addTearDownHook(self.cleanupSubprocesses)
self.runCmd(('process attach -p ' + str(popen.pid)))
self._state = 0
def process_events():
event = lldb.SBEvent()
while self.dbg.GetListener().GetNextEvent(event):
self._state = lldb.SBProcess.GetStateFromEvent(event)
def wait_for_state(s, timeout=5):
t = 0
period = 0.1
while (self._state != s):
process_events()
time.sleep(period)
t += period
if (t > timeout):
return False
return True
self.setAsync(True)
self.runCmd('c')
self.assertTrue(wait_for_state(lldb.eStateRunning), 'Process not running after continue')
self.runCmd('process interrupt')
self.assertTrue(wait_for_state(lldb.eStateStopped), 'Process not stopped after interrupt')
self.runCmd('c')
self.assertTrue(wait_for_state(lldb.eStateRunning), 'Process not running after continue')
self.runCmd('process interrupt')
self.assertTrue(wait_for_state(lldb.eStateStopped), 'Process not stopped after interrupt')
self.runCmd(('br set -f main.cpp -l %u' % line_number('main.cpp', '// Set breakpoint here')))
self.runCmd('c')
self.assertTrue(wait_for_state(lldb.eStateRunning), 'Process not running after continue')
self.assertTrue(wait_for_state(lldb.eStateStopped), 'Process not stopped after breakpoint')
self.expect('br list', 'Breakpoint not hit', patterns=['hit count = [1-9]'])
self.runCmd('c')
self.assertTrue(wait_for_state(lldb.eStateRunning), 'Process not running after continue')
self.runCmd('detach')
self.assertTrue(wait_for_state(lldb.eStateDetached), 'Process not detached after detach') | @expectedFlakeyLinux('llvm.org/pr19478')
@skipIfRemote
def process_attach_continue_interrupt_detach(self):
exe = os.path.join(os.getcwd(), exe_name)
popen = self.spawnSubprocess(exe)
self.addTearDownHook(self.cleanupSubprocesses)
self.runCmd(('process attach -p ' + str(popen.pid)))
self._state = 0
def process_events():
event = lldb.SBEvent()
while self.dbg.GetListener().GetNextEvent(event):
self._state = lldb.SBProcess.GetStateFromEvent(event)
def wait_for_state(s, timeout=5):
t = 0
period = 0.1
while (self._state != s):
process_events()
time.sleep(period)
t += period
if (t > timeout):
return False
return True
self.setAsync(True)
self.runCmd('c')
self.assertTrue(wait_for_state(lldb.eStateRunning), 'Process not running after continue')
self.runCmd('process interrupt')
self.assertTrue(wait_for_state(lldb.eStateStopped), 'Process not stopped after interrupt')
self.runCmd('c')
self.assertTrue(wait_for_state(lldb.eStateRunning), 'Process not running after continue')
self.runCmd('process interrupt')
self.assertTrue(wait_for_state(lldb.eStateStopped), 'Process not stopped after interrupt')
self.runCmd(('br set -f main.cpp -l %u' % line_number('main.cpp', '// Set breakpoint here')))
self.runCmd('c')
self.assertTrue(wait_for_state(lldb.eStateRunning), 'Process not running after continue')
self.assertTrue(wait_for_state(lldb.eStateStopped), 'Process not stopped after breakpoint')
self.expect('br list', 'Breakpoint not hit', patterns=['hit count = [1-9]'])
self.runCmd('c')
self.assertTrue(wait_for_state(lldb.eStateRunning), 'Process not running after continue')
self.runCmd('detach')
self.assertTrue(wait_for_state(lldb.eStateDetached), 'Process not detached after detach')<|docstring|>Test attach/continue/interrupt/detach<|endoftext|> |
eda25b0741b257ef1b45142f7211967bc15f2f3fe5134e6577cf5d0fdfce4908 | def read(*paths):
'Build a file path from *paths* and return the contents.'
with open(os.path.join(*paths), 'r') as f:
return f.read() | Build a file path from *paths* and return the contents. | setup.py | read | tmr232/rage | 0 | python | def read(*paths):
with open(os.path.join(*paths), 'r') as f:
return f.read() | def read(*paths):
with open(os.path.join(*paths), 'r') as f:
return f.read()<|docstring|>Build a file path from *paths* and return the contents.<|endoftext|> |
5de948b68a571a37544dca591707dca99e050882fca83d7484a26f31e4a1a975 | def format_eval_input_for_neg(user_item_dict, neg_user_list):
'Eval input data for negative samples'
user_list = []
item_list = []
label_list = []
for user_id in neg_user_list:
cur_item_list = user_item_dict[user_id]
l_cur_item_list = len(cur_item_list)
for i in range(l_cur_item_list):
user_list.append(user_id)
item_list.append(cur_item_list[i])
label_list.append(0)
return (dsnp.reshape(Tensor(user_list), ((- 1), 1, 1)), dsnp.reshape(Tensor(item_list), ((- 1), 1, 1)), dsnp.reshape(Tensor(label_list), ((- 1), 1, 1))) | Eval input data for negative samples | papers/DiffNet++/eval.py | format_eval_input_for_neg | mindspore-ai/contrib | 2 | python | def format_eval_input_for_neg(user_item_dict, neg_user_list):
user_list = []
item_list = []
label_list = []
for user_id in neg_user_list:
cur_item_list = user_item_dict[user_id]
l_cur_item_list = len(cur_item_list)
for i in range(l_cur_item_list):
user_list.append(user_id)
item_list.append(cur_item_list[i])
label_list.append(0)
return (dsnp.reshape(Tensor(user_list), ((- 1), 1, 1)), dsnp.reshape(Tensor(item_list), ((- 1), 1, 1)), dsnp.reshape(Tensor(label_list), ((- 1), 1, 1))) | def format_eval_input_for_neg(user_item_dict, neg_user_list):
user_list = []
item_list = []
label_list = []
for user_id in neg_user_list:
cur_item_list = user_item_dict[user_id]
l_cur_item_list = len(cur_item_list)
for i in range(l_cur_item_list):
user_list.append(user_id)
item_list.append(cur_item_list[i])
label_list.append(0)
return (dsnp.reshape(Tensor(user_list), ((- 1), 1, 1)), dsnp.reshape(Tensor(item_list), ((- 1), 1, 1)), dsnp.reshape(Tensor(label_list), ((- 1), 1, 1)))<|docstring|>Eval input data for negative samples<|endoftext|> |
c646d2caf41ea9c2a68d742face4917597ef455f3c37bae008a6254d06bfef7f | def get_hr_ndcg(index_dict, pos_prediction_input, neg_prediction_input, topk):
'Get Hit HR and NDCG results'
hr_list = []
ndcg_list = []
for idx in range(len(eval_user_list)):
user = eval_user_list[idx]
cur_user_pos_prediction = pos_prediction_input.asnumpy()[index_dict[user]]
cur_user_neg_prediction = neg_prediction_input.asnumpy()[idx]
positive_length = len(cur_user_pos_prediction)
target_length = min(topk, positive_length)
total_prediction = np.concatenate([cur_user_pos_prediction, cur_user_neg_prediction])
sort_index = np.argsort(total_prediction)
sort_index = sort_index[::(- 1)]
user_hr_list = []
user_ndcg_list = []
for i in range(topk):
ranking = sort_index[i]
if (ranking < positive_length):
user_hr_list.append(Metrics.Gethr())
user_ndcg_list.append(Metrics.Getdcg(i))
idcg = Metrics.Getidcg(target_length)
tmp_hr = (np.sum(user_hr_list) / target_length)
tmp_ndcg = (np.sum(user_ndcg_list) / idcg)
hr_list.append(tmp_hr)
ndcg_list.append(tmp_ndcg)
return (np.mean(hr_list), np.mean(ndcg_list)) | Get Hit HR and NDCG results | papers/DiffNet++/eval.py | get_hr_ndcg | mindspore-ai/contrib | 2 | python | def get_hr_ndcg(index_dict, pos_prediction_input, neg_prediction_input, topk):
hr_list = []
ndcg_list = []
for idx in range(len(eval_user_list)):
user = eval_user_list[idx]
cur_user_pos_prediction = pos_prediction_input.asnumpy()[index_dict[user]]
cur_user_neg_prediction = neg_prediction_input.asnumpy()[idx]
positive_length = len(cur_user_pos_prediction)
target_length = min(topk, positive_length)
total_prediction = np.concatenate([cur_user_pos_prediction, cur_user_neg_prediction])
sort_index = np.argsort(total_prediction)
sort_index = sort_index[::(- 1)]
user_hr_list = []
user_ndcg_list = []
for i in range(topk):
ranking = sort_index[i]
if (ranking < positive_length):
user_hr_list.append(Metrics.Gethr())
user_ndcg_list.append(Metrics.Getdcg(i))
idcg = Metrics.Getidcg(target_length)
tmp_hr = (np.sum(user_hr_list) / target_length)
tmp_ndcg = (np.sum(user_ndcg_list) / idcg)
hr_list.append(tmp_hr)
ndcg_list.append(tmp_ndcg)
return (np.mean(hr_list), np.mean(ndcg_list)) | def get_hr_ndcg(index_dict, pos_prediction_input, neg_prediction_input, topk):
hr_list = []
ndcg_list = []
for idx in range(len(eval_user_list)):
user = eval_user_list[idx]
cur_user_pos_prediction = pos_prediction_input.asnumpy()[index_dict[user]]
cur_user_neg_prediction = neg_prediction_input.asnumpy()[idx]
positive_length = len(cur_user_pos_prediction)
target_length = min(topk, positive_length)
total_prediction = np.concatenate([cur_user_pos_prediction, cur_user_neg_prediction])
sort_index = np.argsort(total_prediction)
sort_index = sort_index[::(- 1)]
user_hr_list = []
user_ndcg_list = []
for i in range(topk):
ranking = sort_index[i]
if (ranking < positive_length):
user_hr_list.append(Metrics.Gethr())
user_ndcg_list.append(Metrics.Getdcg(i))
idcg = Metrics.Getidcg(target_length)
tmp_hr = (np.sum(user_hr_list) / target_length)
tmp_ndcg = (np.sum(user_ndcg_list) / idcg)
hr_list.append(tmp_hr)
ndcg_list.append(tmp_ndcg)
return (np.mean(hr_list), np.mean(ndcg_list))<|docstring|>Get Hit HR and NDCG results<|endoftext|> |
e963a051f138f5219d02a94742eeecc77525a3d4c58334ac7436133a4d586c1e | def __init__(self, env: gym.Env, env_info: ConfigDict, hyper_params: ConfigDict, learner_cfg: ConfigDict, log_cfg: ConfigDict, is_test: bool, load_from: str, is_render: bool, render_after: int, is_log: bool, save_period: int, episode_num: int, max_episode_steps: int, interim_test_num: int):
'Initialize.'
Agent.__init__(self, env, env_info, log_cfg, is_test, load_from, is_render, render_after, is_log, save_period, episode_num, max_episode_steps, interim_test_num)
self.curr_state = np.zeros((1,))
self.total_step = 0
self.episode_step = 0
self.i_episode = 0
self.hyper_params = hyper_params
self.learner_cfg = learner_cfg
self._initialize() | Initialize. | rl_algorithms/sac/agent.py | __init__ | medipixel/rl_algorithms | 466 | python | def __init__(self, env: gym.Env, env_info: ConfigDict, hyper_params: ConfigDict, learner_cfg: ConfigDict, log_cfg: ConfigDict, is_test: bool, load_from: str, is_render: bool, render_after: int, is_log: bool, save_period: int, episode_num: int, max_episode_steps: int, interim_test_num: int):
Agent.__init__(self, env, env_info, log_cfg, is_test, load_from, is_render, render_after, is_log, save_period, episode_num, max_episode_steps, interim_test_num)
self.curr_state = np.zeros((1,))
self.total_step = 0
self.episode_step = 0
self.i_episode = 0
self.hyper_params = hyper_params
self.learner_cfg = learner_cfg
self._initialize() | def __init__(self, env: gym.Env, env_info: ConfigDict, hyper_params: ConfigDict, learner_cfg: ConfigDict, log_cfg: ConfigDict, is_test: bool, load_from: str, is_render: bool, render_after: int, is_log: bool, save_period: int, episode_num: int, max_episode_steps: int, interim_test_num: int):
Agent.__init__(self, env, env_info, log_cfg, is_test, load_from, is_render, render_after, is_log, save_period, episode_num, max_episode_steps, interim_test_num)
self.curr_state = np.zeros((1,))
self.total_step = 0
self.episode_step = 0
self.i_episode = 0
self.hyper_params = hyper_params
self.learner_cfg = learner_cfg
self._initialize()<|docstring|>Initialize.<|endoftext|> |
96b8933dc2cd7e948a0ae77577341fd71d199feccfafc74f4e5787af76bc454d | def _initialize(self):
'Initialize non-common things.'
if (not self.is_test):
self.memory = ReplayBuffer(self.hyper_params.buffer_size, self.hyper_params.batch_size)
build_args = dict(hyper_params=self.hyper_params, log_cfg=self.log_cfg, env_name=self.env_info.name, state_size=self.env_info.observation_space.shape, output_size=self.env_info.action_space.shape[0], is_test=self.is_test, load_from=self.load_from)
self.learner = build_learner(self.learner_cfg, build_args) | Initialize non-common things. | rl_algorithms/sac/agent.py | _initialize | medipixel/rl_algorithms | 466 | python | def _initialize(self):
if (not self.is_test):
self.memory = ReplayBuffer(self.hyper_params.buffer_size, self.hyper_params.batch_size)
build_args = dict(hyper_params=self.hyper_params, log_cfg=self.log_cfg, env_name=self.env_info.name, state_size=self.env_info.observation_space.shape, output_size=self.env_info.action_space.shape[0], is_test=self.is_test, load_from=self.load_from)
self.learner = build_learner(self.learner_cfg, build_args) | def _initialize(self):
if (not self.is_test):
self.memory = ReplayBuffer(self.hyper_params.buffer_size, self.hyper_params.batch_size)
build_args = dict(hyper_params=self.hyper_params, log_cfg=self.log_cfg, env_name=self.env_info.name, state_size=self.env_info.observation_space.shape, output_size=self.env_info.action_space.shape[0], is_test=self.is_test, load_from=self.load_from)
self.learner = build_learner(self.learner_cfg, build_args)<|docstring|>Initialize non-common things.<|endoftext|> |
456f6b36428f25a3f20c97051c0af04a6c4a1bad7f56b00d11b07c64a68ed675 | def select_action(self, state: np.ndarray) -> np.ndarray:
'Select an action from the input space.'
self.curr_state = state
state = self._preprocess_state(state)
if ((self.total_step < self.hyper_params.initial_random_action) and (not self.is_test)):
return np.array(self.env.action_space.sample())
with torch.no_grad():
if self.is_test:
(_, _, _, selected_action, _) = self.learner.actor(state)
else:
(selected_action, _, _, _, _) = self.learner.actor(state)
return selected_action.detach().cpu().numpy() | Select an action from the input space. | rl_algorithms/sac/agent.py | select_action | medipixel/rl_algorithms | 466 | python | def select_action(self, state: np.ndarray) -> np.ndarray:
self.curr_state = state
state = self._preprocess_state(state)
if ((self.total_step < self.hyper_params.initial_random_action) and (not self.is_test)):
return np.array(self.env.action_space.sample())
with torch.no_grad():
if self.is_test:
(_, _, _, selected_action, _) = self.learner.actor(state)
else:
(selected_action, _, _, _, _) = self.learner.actor(state)
return selected_action.detach().cpu().numpy() | def select_action(self, state: np.ndarray) -> np.ndarray:
self.curr_state = state
state = self._preprocess_state(state)
if ((self.total_step < self.hyper_params.initial_random_action) and (not self.is_test)):
return np.array(self.env.action_space.sample())
with torch.no_grad():
if self.is_test:
(_, _, _, selected_action, _) = self.learner.actor(state)
else:
(selected_action, _, _, _, _) = self.learner.actor(state)
return selected_action.detach().cpu().numpy()<|docstring|>Select an action from the input space.<|endoftext|> |
5352d92f1a55278bfb3cacd123e57e84c1b7c688ceb4592c8d7d56726842d5f6 | def _preprocess_state(self, state: np.ndarray) -> torch.Tensor:
'Preprocess state so that actor selects an action.'
state = numpy2floattensor(state, self.learner.device)
return state | Preprocess state so that actor selects an action. | rl_algorithms/sac/agent.py | _preprocess_state | medipixel/rl_algorithms | 466 | python | def _preprocess_state(self, state: np.ndarray) -> torch.Tensor:
state = numpy2floattensor(state, self.learner.device)
return state | def _preprocess_state(self, state: np.ndarray) -> torch.Tensor:
state = numpy2floattensor(state, self.learner.device)
return state<|docstring|>Preprocess state so that actor selects an action.<|endoftext|> |
2d51bf532ab79d8a5dfebc39f654c4ee249c974ad2cc9f2aadd53cddb88c9118 | def step(self, action: np.ndarray) -> Tuple[(np.ndarray, np.float64, bool, dict)]:
'Take an action and return the response of the env.'
(next_state, reward, done, info) = self.env.step(action)
if (not self.is_test):
done_bool = (False if (self.episode_step == self.max_episode_steps) else done)
transition = (self.curr_state, action, reward, next_state, done_bool)
self._add_transition_to_memory(transition)
return (next_state, reward, done, info) | Take an action and return the response of the env. | rl_algorithms/sac/agent.py | step | medipixel/rl_algorithms | 466 | python | def step(self, action: np.ndarray) -> Tuple[(np.ndarray, np.float64, bool, dict)]:
(next_state, reward, done, info) = self.env.step(action)
if (not self.is_test):
done_bool = (False if (self.episode_step == self.max_episode_steps) else done)
transition = (self.curr_state, action, reward, next_state, done_bool)
self._add_transition_to_memory(transition)
return (next_state, reward, done, info) | def step(self, action: np.ndarray) -> Tuple[(np.ndarray, np.float64, bool, dict)]:
(next_state, reward, done, info) = self.env.step(action)
if (not self.is_test):
done_bool = (False if (self.episode_step == self.max_episode_steps) else done)
transition = (self.curr_state, action, reward, next_state, done_bool)
self._add_transition_to_memory(transition)
return (next_state, reward, done, info)<|docstring|>Take an action and return the response of the env.<|endoftext|> |
b20c1b8a71e34f4b76f54985c26f43f0a638f622756b7fa1e0dae32e0bafeaf5 | def _add_transition_to_memory(self, transition: Tuple[(np.ndarray, ...)]):
'Add 1 step and n step transitions to memory.'
self.memory.add(transition) | Add 1 step and n step transitions to memory. | rl_algorithms/sac/agent.py | _add_transition_to_memory | medipixel/rl_algorithms | 466 | python | def _add_transition_to_memory(self, transition: Tuple[(np.ndarray, ...)]):
self.memory.add(transition) | def _add_transition_to_memory(self, transition: Tuple[(np.ndarray, ...)]):
self.memory.add(transition)<|docstring|>Add 1 step and n step transitions to memory.<|endoftext|> |
ae9f4e3a7f353bc1e8354dccadb0644c163c10adee16bddabb48e7155a4bb205 | def write_log(self, log_value: tuple):
'Write log about loss and score'
(i, loss, score, policy_update_freq, avg_time_cost) = log_value
total_loss = loss.sum()
print(('[INFO] episode %d, episode_step %d, total step %d, total score: %d\ntotal loss: %.3f actor_loss: %.3f qf_1_loss: %.3f qf_2_loss: %.3f vf_loss: %.3f alpha_loss: %.3f (spent %.6f sec/step)\n' % (i, self.episode_step, self.total_step, score, total_loss, (loss[0] * policy_update_freq), loss[1], loss[2], loss[3], loss[4], avg_time_cost)))
if self.is_log:
wandb.log({'score': score, 'total loss': total_loss, 'actor loss': (loss[0] * policy_update_freq), 'qf_1 loss': loss[1], 'qf_2 loss': loss[2], 'vf loss': loss[3], 'alpha loss': loss[4], 'time per each step': avg_time_cost}) | Write log about loss and score | rl_algorithms/sac/agent.py | write_log | medipixel/rl_algorithms | 466 | python | def write_log(self, log_value: tuple):
(i, loss, score, policy_update_freq, avg_time_cost) = log_value
total_loss = loss.sum()
print(('[INFO] episode %d, episode_step %d, total step %d, total score: %d\ntotal loss: %.3f actor_loss: %.3f qf_1_loss: %.3f qf_2_loss: %.3f vf_loss: %.3f alpha_loss: %.3f (spent %.6f sec/step)\n' % (i, self.episode_step, self.total_step, score, total_loss, (loss[0] * policy_update_freq), loss[1], loss[2], loss[3], loss[4], avg_time_cost)))
if self.is_log:
wandb.log({'score': score, 'total loss': total_loss, 'actor loss': (loss[0] * policy_update_freq), 'qf_1 loss': loss[1], 'qf_2 loss': loss[2], 'vf loss': loss[3], 'alpha loss': loss[4], 'time per each step': avg_time_cost}) | def write_log(self, log_value: tuple):
(i, loss, score, policy_update_freq, avg_time_cost) = log_value
total_loss = loss.sum()
print(('[INFO] episode %d, episode_step %d, total step %d, total score: %d\ntotal loss: %.3f actor_loss: %.3f qf_1_loss: %.3f qf_2_loss: %.3f vf_loss: %.3f alpha_loss: %.3f (spent %.6f sec/step)\n' % (i, self.episode_step, self.total_step, score, total_loss, (loss[0] * policy_update_freq), loss[1], loss[2], loss[3], loss[4], avg_time_cost)))
if self.is_log:
wandb.log({'score': score, 'total loss': total_loss, 'actor loss': (loss[0] * policy_update_freq), 'qf_1 loss': loss[1], 'qf_2 loss': loss[2], 'vf loss': loss[3], 'alpha loss': loss[4], 'time per each step': avg_time_cost})<|docstring|>Write log about loss and score<|endoftext|> |
6bb5763ea77c03354ec87c6ab391d97d32a0c8b8a9b39926249cb8e12c1e76d8 | def pretrain(self):
'Pretraining steps.'
pass | Pretraining steps. | rl_algorithms/sac/agent.py | pretrain | medipixel/rl_algorithms | 466 | python | def pretrain(self):
pass | def pretrain(self):
pass<|docstring|>Pretraining steps.<|endoftext|> |
c9b7511e125c38bf1db22ab43bb200d3f35d06e3fd957d84eb3b5a36bc638b67 | def train(self):
'Train the agent.'
if self.is_log:
self.set_wandb()
self.pretrain()
for self.i_episode in range(1, (self.episode_num + 1)):
state = self.env.reset()
done = False
score = 0
self.episode_step = 0
loss_episode = list()
t_begin = time.time()
while (not done):
if (self.is_render and (self.i_episode >= self.render_after)):
self.env.render()
action = self.select_action(state)
(next_state, reward, done, _) = self.step(action)
self.total_step += 1
self.episode_step += 1
state = next_state
score += reward
if (len(self.memory) >= self.hyper_params.batch_size):
for _ in range(self.hyper_params.multiple_update):
experience = self.memory.sample()
experience = numpy2floattensor(experience, self.learner.device)
loss = self.learner.update_model(experience)
loss_episode.append(loss)
t_end = time.time()
avg_time_cost = ((t_end - t_begin) / self.episode_step)
if loss_episode:
avg_loss = np.vstack(loss_episode).mean(axis=0)
log_value = (self.i_episode, avg_loss, score, self.hyper_params.policy_update_freq, avg_time_cost)
self.write_log(log_value)
if ((self.i_episode % self.save_period) == 0):
self.learner.save_params(self.i_episode)
self.interim_test()
self.env.close()
self.learner.save_params(self.i_episode)
self.interim_test() | Train the agent. | rl_algorithms/sac/agent.py | train | medipixel/rl_algorithms | 466 | python | def train(self):
if self.is_log:
self.set_wandb()
self.pretrain()
for self.i_episode in range(1, (self.episode_num + 1)):
state = self.env.reset()
done = False
score = 0
self.episode_step = 0
loss_episode = list()
t_begin = time.time()
while (not done):
if (self.is_render and (self.i_episode >= self.render_after)):
self.env.render()
action = self.select_action(state)
(next_state, reward, done, _) = self.step(action)
self.total_step += 1
self.episode_step += 1
state = next_state
score += reward
if (len(self.memory) >= self.hyper_params.batch_size):
for _ in range(self.hyper_params.multiple_update):
experience = self.memory.sample()
experience = numpy2floattensor(experience, self.learner.device)
loss = self.learner.update_model(experience)
loss_episode.append(loss)
t_end = time.time()
avg_time_cost = ((t_end - t_begin) / self.episode_step)
if loss_episode:
avg_loss = np.vstack(loss_episode).mean(axis=0)
log_value = (self.i_episode, avg_loss, score, self.hyper_params.policy_update_freq, avg_time_cost)
self.write_log(log_value)
if ((self.i_episode % self.save_period) == 0):
self.learner.save_params(self.i_episode)
self.interim_test()
self.env.close()
self.learner.save_params(self.i_episode)
self.interim_test() | def train(self):
if self.is_log:
self.set_wandb()
self.pretrain()
for self.i_episode in range(1, (self.episode_num + 1)):
state = self.env.reset()
done = False
score = 0
self.episode_step = 0
loss_episode = list()
t_begin = time.time()
while (not done):
if (self.is_render and (self.i_episode >= self.render_after)):
self.env.render()
action = self.select_action(state)
(next_state, reward, done, _) = self.step(action)
self.total_step += 1
self.episode_step += 1
state = next_state
score += reward
if (len(self.memory) >= self.hyper_params.batch_size):
for _ in range(self.hyper_params.multiple_update):
experience = self.memory.sample()
experience = numpy2floattensor(experience, self.learner.device)
loss = self.learner.update_model(experience)
loss_episode.append(loss)
t_end = time.time()
avg_time_cost = ((t_end - t_begin) / self.episode_step)
if loss_episode:
avg_loss = np.vstack(loss_episode).mean(axis=0)
log_value = (self.i_episode, avg_loss, score, self.hyper_params.policy_update_freq, avg_time_cost)
self.write_log(log_value)
if ((self.i_episode % self.save_period) == 0):
self.learner.save_params(self.i_episode)
self.interim_test()
self.env.close()
self.learner.save_params(self.i_episode)
self.interim_test()<|docstring|>Train the agent.<|endoftext|> |
7f444e7dda666ab4bef5e94045a542baa396872201124ab2d245d1ffeacf26ec | def residual(self, q0, qend, dt, fSDC, feval, **kwargs):
'Return the residual of *fSDC*.'
f = np.empty(((self.nnodes,) + feval.shape), dtype=fSDC.dtype)
ff = f.reshape((self.nnodes, feval.size))
for m in range(self.nnodes):
f[m] = fSDC[(0, m)]
for p in range(1, fSDC.shape[0]):
f[m] += fSDC[(p, m)]
int_ff = (dt * np.dot(self.smat, ff))
int_f = int_ff.reshape((((self.nnodes - 1),) + feval.shape))
tot_f = np.zeros(feval.shape, dtype=fSDC.dtype)
for m in range((self.nnodes - 1)):
tot_f += int_f[(m, :)]
return ((q0 + tot_f) - qend) | Return the residual of *fSDC*. | pfasst/sdc.py | residual | memmett/PyPFASST | 6 | python | def residual(self, q0, qend, dt, fSDC, feval, **kwargs):
f = np.empty(((self.nnodes,) + feval.shape), dtype=fSDC.dtype)
ff = f.reshape((self.nnodes, feval.size))
for m in range(self.nnodes):
f[m] = fSDC[(0, m)]
for p in range(1, fSDC.shape[0]):
f[m] += fSDC[(p, m)]
int_ff = (dt * np.dot(self.smat, ff))
int_f = int_ff.reshape((((self.nnodes - 1),) + feval.shape))
tot_f = np.zeros(feval.shape, dtype=fSDC.dtype)
for m in range((self.nnodes - 1)):
tot_f += int_f[(m, :)]
return ((q0 + tot_f) - qend) | def residual(self, q0, qend, dt, fSDC, feval, **kwargs):
f = np.empty(((self.nnodes,) + feval.shape), dtype=fSDC.dtype)
ff = f.reshape((self.nnodes, feval.size))
for m in range(self.nnodes):
f[m] = fSDC[(0, m)]
for p in range(1, fSDC.shape[0]):
f[m] += fSDC[(p, m)]
int_ff = (dt * np.dot(self.smat, ff))
int_f = int_ff.reshape((((self.nnodes - 1),) + feval.shape))
tot_f = np.zeros(feval.shape, dtype=fSDC.dtype)
for m in range((self.nnodes - 1)):
tot_f += int_f[(m, :)]
return ((q0 + tot_f) - qend)<|docstring|>Return the residual of *fSDC*.<|endoftext|> |
62ee179388b21eae9d3133877ba356ff19baa259d36efaf97abaf16be9b8a8aa | def sweep(self, *args, **kwargs):
'Perform one SDC sweep.\n\n **This method should be overridden.**\n '
raise NotImplementedError() | Perform one SDC sweep.
**This method should be overridden.** | pfasst/sdc.py | sweep | memmett/PyPFASST | 6 | python | def sweep(self, *args, **kwargs):
'Perform one SDC sweep.\n\n **This method should be overridden.**\n '
raise NotImplementedError() | def sweep(self, *args, **kwargs):
'Perform one SDC sweep.\n\n **This method should be overridden.**\n '
raise NotImplementedError()<|docstring|>Perform one SDC sweep.
**This method should be overridden.**<|endoftext|> |
2c12485e6f7ff5dcb5383738ed083811b533c6cc073b9145bd4a9aa7c4f391f9 | def evaluate(self, *args, **kwargs):
'Evaluate.\n\n **This method should be overridden.**\n '
raise NotImplementedError() | Evaluate.
**This method should be overridden.** | pfasst/sdc.py | evaluate | memmett/PyPFASST | 6 | python | def evaluate(self, *args, **kwargs):
'Evaluate.\n\n **This method should be overridden.**\n '
raise NotImplementedError() | def evaluate(self, *args, **kwargs):
'Evaluate.\n\n **This method should be overridden.**\n '
raise NotImplementedError()<|docstring|>Evaluate.
**This method should be overridden.**<|endoftext|> |
4e0500c796638303615cb161a4a4421f2bfab391c91f4eaec3f5128942ff5f0a | def MakePupil(D_eval, side_len, N):
'\n Create a pupil at the receiver plane to evaluate the structure function that accounts for receiver sampling effects\n\n :param D_eval: the diameter of the pupil in meters\n :param side_len: the sidelength of the receiver plane in meters\n :param N: the number of discrete intervals at the receiver\n :return: a pupil function that can be used to mask the receiver plane to the desirec aperture\n '
boundary1 = (- (side_len / 2))
boundary2 = (side_len / 2)
A = np.linspace(boundary1, boundary2, N)
A = np.array(([A] * N))
base = np.linspace(boundary1, boundary2, N)
set_ones = np.ones(N)
B = np.array(([set_ones] * N))
for i in range(0, len(base)):
B[i] = (B[i] * base[i])
A = A.reshape(N, N)
B = B.reshape(N, N)
x_coord = (A ** 2)
y_coord = (B ** 2)
rad_dist = np.sqrt((x_coord + y_coord))
mask = []
for row in rad_dist:
for val in row:
if (val < D_eval):
mask.append(1.0)
elif (val > D_eval):
mask.append(0.0)
elif (val == D_eval):
mask.append(0.5)
mask = np.array([mask])
mask = mask.reshape(N, N)
return mask | Create a pupil at the receiver plane to evaluate the structure function that accounts for receiver sampling effects
:param D_eval: the diameter of the pupil in meters
:param side_len: the sidelength of the receiver plane in meters
:param N: the number of discrete intervals at the receiver
:return: a pupil function that can be used to mask the receiver plane to the desirec aperture | sim_evaluation_module.py | MakePupil | gregbad/WavePy | 0 | python | def MakePupil(D_eval, side_len, N):
'\n Create a pupil at the receiver plane to evaluate the structure function that accounts for receiver sampling effects\n\n :param D_eval: the diameter of the pupil in meters\n :param side_len: the sidelength of the receiver plane in meters\n :param N: the number of discrete intervals at the receiver\n :return: a pupil function that can be used to mask the receiver plane to the desirec aperture\n '
boundary1 = (- (side_len / 2))
boundary2 = (side_len / 2)
A = np.linspace(boundary1, boundary2, N)
A = np.array(([A] * N))
base = np.linspace(boundary1, boundary2, N)
set_ones = np.ones(N)
B = np.array(([set_ones] * N))
for i in range(0, len(base)):
B[i] = (B[i] * base[i])
A = A.reshape(N, N)
B = B.reshape(N, N)
x_coord = (A ** 2)
y_coord = (B ** 2)
rad_dist = np.sqrt((x_coord + y_coord))
mask = []
for row in rad_dist:
for val in row:
if (val < D_eval):
mask.append(1.0)
elif (val > D_eval):
mask.append(0.0)
elif (val == D_eval):
mask.append(0.5)
mask = np.array([mask])
mask = mask.reshape(N, N)
return mask | def MakePupil(D_eval, side_len, N):
'\n Create a pupil at the receiver plane to evaluate the structure function that accounts for receiver sampling effects\n\n :param D_eval: the diameter of the pupil in meters\n :param side_len: the sidelength of the receiver plane in meters\n :param N: the number of discrete intervals at the receiver\n :return: a pupil function that can be used to mask the receiver plane to the desirec aperture\n '
boundary1 = (- (side_len / 2))
boundary2 = (side_len / 2)
A = np.linspace(boundary1, boundary2, N)
A = np.array(([A] * N))
base = np.linspace(boundary1, boundary2, N)
set_ones = np.ones(N)
B = np.array(([set_ones] * N))
for i in range(0, len(base)):
B[i] = (B[i] * base[i])
A = A.reshape(N, N)
B = B.reshape(N, N)
x_coord = (A ** 2)
y_coord = (B ** 2)
rad_dist = np.sqrt((x_coord + y_coord))
mask = []
for row in rad_dist:
for val in row:
if (val < D_eval):
mask.append(1.0)
elif (val > D_eval):
mask.append(0.0)
elif (val == D_eval):
mask.append(0.5)
mask = np.array([mask])
mask = mask.reshape(N, N)
return mask<|docstring|>Create a pupil at the receiver plane to evaluate the structure function that accounts for receiver sampling effects
:param D_eval: the diameter of the pupil in meters
:param side_len: the sidelength of the receiver plane in meters
:param N: the number of discrete intervals at the receiver
:return: a pupil function that can be used to mask the receiver plane to the desirec aperture<|endoftext|> |
2f7033f5322177e0d9ed63edefb680a31f9231cd6daf9d425ea7ac7eb3032bed | def structure_function_over_time(report_filename, sim_result_directory, D_receiver_pupil=None):
'\n Evaluate the accuracy of the turbulence simulation by computing the structure function at the receiver plane using\n a mutual coherence function approach. Note: This will only be accurate over many different simulations and some\n disagreement should be expected over a single turbulence simulation\n\n :param report_filename: the filename including path for the output report of the turbulence simulation\n :param sim_result_directory: the directory in which the turbulence output simulation files over the timesteps have\n been written out\n :param D_receiver_pupil: the receiver pupil diametre in meters for cropping purposes\n :return:\n '
with open(report_filename, 'r') as f:
sim_dict = yaml.load(f)
tsteps = sim_dict['Timesteps']
Rdx = sim_dict['Receiver Rdx']
N = sim_dict['N']
r0 = sim_dict['r0']
L0 = sim_dict['L0']
l0 = sim_dict['l0']
delta_f = (1 / (N * Rdx))
side_len = (N * Rdx)
if (D_receiver_pupil is None):
D_receiver_pupil = (r0 * 5.0)
aperture_mask = MakePupil(D_receiver_pupil, side_len, N)
(fig, ax) = plt.subplots(figsize=(9, 6))
color = cm.jet(np.linspace(0, 1, tsteps))
mesh_spacing = np.arange(0, N)
(X, Y) = np.meshgrid(mesh_spacing, mesh_spacing)
r = np.hypot((X - (N / 2)), (Y - (N / 2)))
r[(int((N / 2)), int((N / 2)))] = 0
rbin = ((N * r) / r.max()).astype(np.int)
bin_sampling = (((r.max() * Rdx) * np.arange(1, (rbin.max() + 1))) / rbin.max())
bin_sampling_cutoff = 0
where_within_ap = 0
mcf_sum = 0
D_sim_array = []
for t in range(0, tsteps):
fname_turbsim_t = (((sim_result_directory + '_turbsim_t') + '{:04d}'.format(t)) + '.npy')
turbsim_t = np.load(fname_turbsim_t)
U_r = (np.fft.fftshift(np.fft.fft2(np.fft.fftshift((turbsim_t * aperture_mask)))) * (Rdx ** 2.0))
U_r_autocorr = (np.fft.ifftshift(np.fft.ifft2(np.fft.ifftshift((np.conj(U_r) * U_r)))) * ((delta_f * N) ** 2.0))
maskcorr = (np.fft.ifftshift(np.fft.ifft2(np.fft.ifftshift((abs(np.fft.ifftshift(np.fft.fft2(np.fft.ifftshift(aperture_mask)))) * (Rdx ** 2.0))))) * ((N * delta_f) ** 2.0))
plt.imshow(np.abs(U_r_autocorr))
plt.show()
plt.imshow(np.abs(maskcorr))
plt.show()
c = ((U_r_autocorr / maskcorr) * aperture_mask)
D_r_t = ((- 2) * np.log((np.abs(c) / np.abs(c[(int((N / 2)), int((N / 2)))]))))
mcf_sum = (mcf_sum + c)
D_1d = scipy.ndimage.mean(D_r_t, labels=rbin, index=np.arange(1, (rbin.max() + 1)))
where_within_ap = (bin_sampling <= D_receiver_pupil)
bin_sampling_cutoff = bin_sampling[where_within_ap]
D_1d_cutoff = D_1d[where_within_ap]
D_sim_array.append(D_1d_cutoff)
MCF_sim = (np.abs(mcf_sum) / np.abs(mcf_sum[(int((N / 2)), int((N / 2)))]))
D_sim_avg = ((- 2) * np.log((np.abs(MCF_sim) / np.abs(MCF_sim[(int((N / 2)), int((N / 2)))]))))
D_sim_1d = scipy.ndimage.mean(D_sim_avg, labels=rbin, index=np.arange(1, (rbin.max() + 1)))
D_sim_1d_cutoff = D_sim_1d[where_within_ap]
ax.plot((bin_sampling_cutoff / r0), D_sim_1d_cutoff, 'red', label='Sim Average MCF')
D_sim_array = np.asarray(D_sim_array)
average_D = np.average(D_sim_array, axis=0)
max_D = np.max(D_sim_array, axis=0)
min_D = np.min(D_sim_array, axis=0)
ax.plot((bin_sampling_cutoff / r0), min_D, 'g--', label='Sample Lower Bound')
ax.plot((bin_sampling_cutoff / r0), max_D, 'k--', label='Sample Upper Bound')
ax.plot((bin_sampling_cutoff / r0), average_D, 'b', label='Sample MCF Average')
D_kolmog = (6.88 * ((bin_sampling_cutoff / r0) ** (5.0 / 3.0)))
k0 = ((2 * np.pi) / L0)
D_mVK = ((((7.75 * (r0 ** ((- 5.0) / 3.0))) * (l0 ** ((- 1.0) / 3.0))) * (bin_sampling_cutoff ** 2.0)) * ((1 / ((1 + ((2.03 * (bin_sampling_cutoff ** 2.0)) / (l0 ** 2.0))) ** (1.0 / 6.0))) - (0.72 * ((k0 * l0) ** (1.0 / 3.0)))))
ax.plot((bin_sampling_cutoff / r0), D_mVK, 'orange', label='MvK (theoretical)')
ax.legend()
ax.set_xlabel('$\\Delta r$/$r_0$')
ax.set_ylabel('$D_\\Phi$(|$\\Delta r$|) [rad$^2$]')
plt.show() | Evaluate the accuracy of the turbulence simulation by computing the structure function at the receiver plane using
a mutual coherence function approach. Note: This will only be accurate over many different simulations and some
disagreement should be expected over a single turbulence simulation
:param report_filename: the filename including path for the output report of the turbulence simulation
:param sim_result_directory: the directory in which the turbulence output simulation files over the timesteps have
been written out
:param D_receiver_pupil: the receiver pupil diametre in meters for cropping purposes
:return: | sim_evaluation_module.py | structure_function_over_time | gregbad/WavePy | 0 | python | def structure_function_over_time(report_filename, sim_result_directory, D_receiver_pupil=None):
'\n Evaluate the accuracy of the turbulence simulation by computing the structure function at the receiver plane using\n a mutual coherence function approach. Note: This will only be accurate over many different simulations and some\n disagreement should be expected over a single turbulence simulation\n\n :param report_filename: the filename including path for the output report of the turbulence simulation\n :param sim_result_directory: the directory in which the turbulence output simulation files over the timesteps have\n been written out\n :param D_receiver_pupil: the receiver pupil diametre in meters for cropping purposes\n :return:\n '
with open(report_filename, 'r') as f:
sim_dict = yaml.load(f)
tsteps = sim_dict['Timesteps']
Rdx = sim_dict['Receiver Rdx']
N = sim_dict['N']
r0 = sim_dict['r0']
L0 = sim_dict['L0']
l0 = sim_dict['l0']
delta_f = (1 / (N * Rdx))
side_len = (N * Rdx)
if (D_receiver_pupil is None):
D_receiver_pupil = (r0 * 5.0)
aperture_mask = MakePupil(D_receiver_pupil, side_len, N)
(fig, ax) = plt.subplots(figsize=(9, 6))
color = cm.jet(np.linspace(0, 1, tsteps))
mesh_spacing = np.arange(0, N)
(X, Y) = np.meshgrid(mesh_spacing, mesh_spacing)
r = np.hypot((X - (N / 2)), (Y - (N / 2)))
r[(int((N / 2)), int((N / 2)))] = 0
rbin = ((N * r) / r.max()).astype(np.int)
bin_sampling = (((r.max() * Rdx) * np.arange(1, (rbin.max() + 1))) / rbin.max())
bin_sampling_cutoff = 0
where_within_ap = 0
mcf_sum = 0
D_sim_array = []
for t in range(0, tsteps):
fname_turbsim_t = (((sim_result_directory + '_turbsim_t') + '{:04d}'.format(t)) + '.npy')
turbsim_t = np.load(fname_turbsim_t)
U_r = (np.fft.fftshift(np.fft.fft2(np.fft.fftshift((turbsim_t * aperture_mask)))) * (Rdx ** 2.0))
U_r_autocorr = (np.fft.ifftshift(np.fft.ifft2(np.fft.ifftshift((np.conj(U_r) * U_r)))) * ((delta_f * N) ** 2.0))
maskcorr = (np.fft.ifftshift(np.fft.ifft2(np.fft.ifftshift((abs(np.fft.ifftshift(np.fft.fft2(np.fft.ifftshift(aperture_mask)))) * (Rdx ** 2.0))))) * ((N * delta_f) ** 2.0))
plt.imshow(np.abs(U_r_autocorr))
plt.show()
plt.imshow(np.abs(maskcorr))
plt.show()
c = ((U_r_autocorr / maskcorr) * aperture_mask)
D_r_t = ((- 2) * np.log((np.abs(c) / np.abs(c[(int((N / 2)), int((N / 2)))]))))
mcf_sum = (mcf_sum + c)
D_1d = scipy.ndimage.mean(D_r_t, labels=rbin, index=np.arange(1, (rbin.max() + 1)))
where_within_ap = (bin_sampling <= D_receiver_pupil)
bin_sampling_cutoff = bin_sampling[where_within_ap]
D_1d_cutoff = D_1d[where_within_ap]
D_sim_array.append(D_1d_cutoff)
MCF_sim = (np.abs(mcf_sum) / np.abs(mcf_sum[(int((N / 2)), int((N / 2)))]))
D_sim_avg = ((- 2) * np.log((np.abs(MCF_sim) / np.abs(MCF_sim[(int((N / 2)), int((N / 2)))]))))
D_sim_1d = scipy.ndimage.mean(D_sim_avg, labels=rbin, index=np.arange(1, (rbin.max() + 1)))
D_sim_1d_cutoff = D_sim_1d[where_within_ap]
ax.plot((bin_sampling_cutoff / r0), D_sim_1d_cutoff, 'red', label='Sim Average MCF')
D_sim_array = np.asarray(D_sim_array)
average_D = np.average(D_sim_array, axis=0)
max_D = np.max(D_sim_array, axis=0)
min_D = np.min(D_sim_array, axis=0)
ax.plot((bin_sampling_cutoff / r0), min_D, 'g--', label='Sample Lower Bound')
ax.plot((bin_sampling_cutoff / r0), max_D, 'k--', label='Sample Upper Bound')
ax.plot((bin_sampling_cutoff / r0), average_D, 'b', label='Sample MCF Average')
D_kolmog = (6.88 * ((bin_sampling_cutoff / r0) ** (5.0 / 3.0)))
k0 = ((2 * np.pi) / L0)
D_mVK = ((((7.75 * (r0 ** ((- 5.0) / 3.0))) * (l0 ** ((- 1.0) / 3.0))) * (bin_sampling_cutoff ** 2.0)) * ((1 / ((1 + ((2.03 * (bin_sampling_cutoff ** 2.0)) / (l0 ** 2.0))) ** (1.0 / 6.0))) - (0.72 * ((k0 * l0) ** (1.0 / 3.0)))))
ax.plot((bin_sampling_cutoff / r0), D_mVK, 'orange', label='MvK (theoretical)')
ax.legend()
ax.set_xlabel('$\\Delta r$/$r_0$')
ax.set_ylabel('$D_\\Phi$(|$\\Delta r$|) [rad$^2$]')
plt.show() | def structure_function_over_time(report_filename, sim_result_directory, D_receiver_pupil=None):
'\n Evaluate the accuracy of the turbulence simulation by computing the structure function at the receiver plane using\n a mutual coherence function approach. Note: This will only be accurate over many different simulations and some\n disagreement should be expected over a single turbulence simulation\n\n :param report_filename: the filename including path for the output report of the turbulence simulation\n :param sim_result_directory: the directory in which the turbulence output simulation files over the timesteps have\n been written out\n :param D_receiver_pupil: the receiver pupil diametre in meters for cropping purposes\n :return:\n '
with open(report_filename, 'r') as f:
sim_dict = yaml.load(f)
tsteps = sim_dict['Timesteps']
Rdx = sim_dict['Receiver Rdx']
N = sim_dict['N']
r0 = sim_dict['r0']
L0 = sim_dict['L0']
l0 = sim_dict['l0']
delta_f = (1 / (N * Rdx))
side_len = (N * Rdx)
if (D_receiver_pupil is None):
D_receiver_pupil = (r0 * 5.0)
aperture_mask = MakePupil(D_receiver_pupil, side_len, N)
(fig, ax) = plt.subplots(figsize=(9, 6))
color = cm.jet(np.linspace(0, 1, tsteps))
mesh_spacing = np.arange(0, N)
(X, Y) = np.meshgrid(mesh_spacing, mesh_spacing)
r = np.hypot((X - (N / 2)), (Y - (N / 2)))
r[(int((N / 2)), int((N / 2)))] = 0
rbin = ((N * r) / r.max()).astype(np.int)
bin_sampling = (((r.max() * Rdx) * np.arange(1, (rbin.max() + 1))) / rbin.max())
bin_sampling_cutoff = 0
where_within_ap = 0
mcf_sum = 0
D_sim_array = []
for t in range(0, tsteps):
fname_turbsim_t = (((sim_result_directory + '_turbsim_t') + '{:04d}'.format(t)) + '.npy')
turbsim_t = np.load(fname_turbsim_t)
U_r = (np.fft.fftshift(np.fft.fft2(np.fft.fftshift((turbsim_t * aperture_mask)))) * (Rdx ** 2.0))
U_r_autocorr = (np.fft.ifftshift(np.fft.ifft2(np.fft.ifftshift((np.conj(U_r) * U_r)))) * ((delta_f * N) ** 2.0))
maskcorr = (np.fft.ifftshift(np.fft.ifft2(np.fft.ifftshift((abs(np.fft.ifftshift(np.fft.fft2(np.fft.ifftshift(aperture_mask)))) * (Rdx ** 2.0))))) * ((N * delta_f) ** 2.0))
plt.imshow(np.abs(U_r_autocorr))
plt.show()
plt.imshow(np.abs(maskcorr))
plt.show()
c = ((U_r_autocorr / maskcorr) * aperture_mask)
D_r_t = ((- 2) * np.log((np.abs(c) / np.abs(c[(int((N / 2)), int((N / 2)))]))))
mcf_sum = (mcf_sum + c)
D_1d = scipy.ndimage.mean(D_r_t, labels=rbin, index=np.arange(1, (rbin.max() + 1)))
where_within_ap = (bin_sampling <= D_receiver_pupil)
bin_sampling_cutoff = bin_sampling[where_within_ap]
D_1d_cutoff = D_1d[where_within_ap]
D_sim_array.append(D_1d_cutoff)
MCF_sim = (np.abs(mcf_sum) / np.abs(mcf_sum[(int((N / 2)), int((N / 2)))]))
D_sim_avg = ((- 2) * np.log((np.abs(MCF_sim) / np.abs(MCF_sim[(int((N / 2)), int((N / 2)))]))))
D_sim_1d = scipy.ndimage.mean(D_sim_avg, labels=rbin, index=np.arange(1, (rbin.max() + 1)))
D_sim_1d_cutoff = D_sim_1d[where_within_ap]
ax.plot((bin_sampling_cutoff / r0), D_sim_1d_cutoff, 'red', label='Sim Average MCF')
D_sim_array = np.asarray(D_sim_array)
average_D = np.average(D_sim_array, axis=0)
max_D = np.max(D_sim_array, axis=0)
min_D = np.min(D_sim_array, axis=0)
ax.plot((bin_sampling_cutoff / r0), min_D, 'g--', label='Sample Lower Bound')
ax.plot((bin_sampling_cutoff / r0), max_D, 'k--', label='Sample Upper Bound')
ax.plot((bin_sampling_cutoff / r0), average_D, 'b', label='Sample MCF Average')
D_kolmog = (6.88 * ((bin_sampling_cutoff / r0) ** (5.0 / 3.0)))
k0 = ((2 * np.pi) / L0)
D_mVK = ((((7.75 * (r0 ** ((- 5.0) / 3.0))) * (l0 ** ((- 1.0) / 3.0))) * (bin_sampling_cutoff ** 2.0)) * ((1 / ((1 + ((2.03 * (bin_sampling_cutoff ** 2.0)) / (l0 ** 2.0))) ** (1.0 / 6.0))) - (0.72 * ((k0 * l0) ** (1.0 / 3.0)))))
ax.plot((bin_sampling_cutoff / r0), D_mVK, 'orange', label='MvK (theoretical)')
ax.legend()
ax.set_xlabel('$\\Delta r$/$r_0$')
ax.set_ylabel('$D_\\Phi$(|$\\Delta r$|) [rad$^2$]')
plt.show()<|docstring|>Evaluate the accuracy of the turbulence simulation by computing the structure function at the receiver plane using
a mutual coherence function approach. Note: This will only be accurate over many different simulations and some
disagreement should be expected over a single turbulence simulation
:param report_filename: the filename including path for the output report of the turbulence simulation
:param sim_result_directory: the directory in which the turbulence output simulation files over the timesteps have
been written out
:param D_receiver_pupil: the receiver pupil diametre in meters for cropping purposes
:return:<|endoftext|> |
78b19657c31d8fcff1490169bc2ab19f6aefdf448d314558e705f1ceea623b30 | def evaluate_phase_structure_function_accuracy(input_N, input_dx, input_cn2, num_screen_draws=20, input_propdist=3000.0, input_wave=1e-06, input_num_subharmonics=5, input_L0=1000.0, input_n_screen_sim=4):
'\n Generate many different phase screens for a simulation setup and compare the statistical structure funciton of the\n phase screen to theoretical fits for a\n\n :param input_N: N for the simulation\n :param input_dx: sampling at the source plane for the simulation\n :param input_cn2: the turbulence of the simulation\n :param num_screen_draws: the number of random screens draws for computing statistics\n :param input_propdist: the propagation distance of the simulation\n :param input_wave: the wavelength in meters for sim\n :param input_num_subharmonics: the number of subharmonics for low frequency screen draws\n :param input_L0: the outer scale of the simulation\n :param input_n_screen_sim: the number of screens used in the simulation for propagating a beam\n :return:\n '
sim = wp.wavepy(N=input_N, L0=input_L0, dx=input_dx, Rdx=input_dx, Cn2=input_cn2, PropDist=input_propdist, NumScr=input_n_screen_sim, W0=5, SideLen=(input_N * input_dx), wvl=input_wave)
r0_scrn_sim = sim.r0scrn
sum_Dr = 0
for nscr in range(0, num_screen_draws):
phz_hi = sim.PhaseScreen(input_dx)
phz_lo = sim.SubHarmonicComp(input_num_subharmonics, input_dx)
(fig, ax) = plt.subplots(1, 3)
ax[0].imshow(phz_hi)
ax[1].imshow(phz_lo)
ax[2].imshow((phz_lo + phz_hi))
plt.show()
phz = (phz_lo + phz_hi)
D_bos = sim.StructFunc(phz)
sum_Dr = (sum_Dr + D_bos)
D_r_avg = (sum_Dr / num_screen_draws)
x = np.arange((- int((input_N / 2))), int((input_N / 2)))
(X, Y) = np.meshgrid(x, x)
R = np.sqrt((((X * input_dx) ** 2) + ((Y * input_dx) ** 2)))
R[(int((input_N / 2)), int((input_N / 2)))] = 0
rbin = ((input_N * R) / R.max()).astype(np.int)
bin_sampling = ((R.max() * np.arange(0, (rbin.max() + 1))) / rbin.max())
cutoff_aperture = (((input_N * input_dx) / 4) / 2)
where_within_aperture = (bin_sampling < cutoff_aperture)
bin_sampling_aperture = bin_sampling[(bin_sampling < cutoff_aperture)]
D_avg_1d = scipy.ndimage.mean(D_r_avg, labels=rbin, index=np.arange(0, (rbin.max() + 1)))
D_avg_1d_cutoff = D_avg_1d[where_within_aperture]
k0 = ((2 * np.pi) / input_L0)
l0 = 0.001
D_mVK_2d = ((((7.75 * (r0_scrn_sim ** ((- 5.0) / 3.0))) * (l0 ** ((- 1.0) / 3.0))) * (R ** 2.0)) * ((1 / ((1 + ((2.03 * (R ** 2.0)) / (l0 ** 2.0))) ** (1.0 / 6.0))) - (0.72 * ((k0 * l0) ** (1.0 / 3.0)))))
D_Kolm_2d = (6.88 * ((R / r0_scrn_sim) ** (5 / 3)))
D_mVK_1d = ((((7.75 * (r0_scrn_sim ** ((- 5.0) / 3.0))) * (l0 ** ((- 1.0) / 3.0))) * (bin_sampling_aperture ** 2.0)) * ((1 / ((1 + ((2.03 * (bin_sampling_aperture ** 2.0)) / (l0 ** 2.0))) ** (1.0 / 6.0))) - (0.72 * ((k0 * l0) ** (1.0 / 3.0)))))
D_Kolm_1d = (6.88 * ((bin_sampling_aperture / r0_scrn_sim) ** (5 / 3)))
plt.plot(bin_sampling_aperture, D_avg_1d_cutoff, 'r', label='Simulation')
plt.plot(bin_sampling_aperture, D_mVK_1d, 'k--', label='MvK')
plt.plot(bin_sampling_aperture, D_Kolm_1d, 'b--', label='Kolmogorov')
plt.legend()
plt.xlabel('$\\Delta r$ [meters]')
plt.ylabel('$D_\\Phi$(|$\\Delta r$|) [rad$^2$]')
plt.show() | Generate many different phase screens for a simulation setup and compare the statistical structure funciton of the
phase screen to theoretical fits for a
:param input_N: N for the simulation
:param input_dx: sampling at the source plane for the simulation
:param input_cn2: the turbulence of the simulation
:param num_screen_draws: the number of random screens draws for computing statistics
:param input_propdist: the propagation distance of the simulation
:param input_wave: the wavelength in meters for sim
:param input_num_subharmonics: the number of subharmonics for low frequency screen draws
:param input_L0: the outer scale of the simulation
:param input_n_screen_sim: the number of screens used in the simulation for propagating a beam
:return: | sim_evaluation_module.py | evaluate_phase_structure_function_accuracy | gregbad/WavePy | 0 | python | def evaluate_phase_structure_function_accuracy(input_N, input_dx, input_cn2, num_screen_draws=20, input_propdist=3000.0, input_wave=1e-06, input_num_subharmonics=5, input_L0=1000.0, input_n_screen_sim=4):
'\n Generate many different phase screens for a simulation setup and compare the statistical structure funciton of the\n phase screen to theoretical fits for a\n\n :param input_N: N for the simulation\n :param input_dx: sampling at the source plane for the simulation\n :param input_cn2: the turbulence of the simulation\n :param num_screen_draws: the number of random screens draws for computing statistics\n :param input_propdist: the propagation distance of the simulation\n :param input_wave: the wavelength in meters for sim\n :param input_num_subharmonics: the number of subharmonics for low frequency screen draws\n :param input_L0: the outer scale of the simulation\n :param input_n_screen_sim: the number of screens used in the simulation for propagating a beam\n :return:\n '
sim = wp.wavepy(N=input_N, L0=input_L0, dx=input_dx, Rdx=input_dx, Cn2=input_cn2, PropDist=input_propdist, NumScr=input_n_screen_sim, W0=5, SideLen=(input_N * input_dx), wvl=input_wave)
r0_scrn_sim = sim.r0scrn
sum_Dr = 0
for nscr in range(0, num_screen_draws):
phz_hi = sim.PhaseScreen(input_dx)
phz_lo = sim.SubHarmonicComp(input_num_subharmonics, input_dx)
(fig, ax) = plt.subplots(1, 3)
ax[0].imshow(phz_hi)
ax[1].imshow(phz_lo)
ax[2].imshow((phz_lo + phz_hi))
plt.show()
phz = (phz_lo + phz_hi)
D_bos = sim.StructFunc(phz)
sum_Dr = (sum_Dr + D_bos)
D_r_avg = (sum_Dr / num_screen_draws)
x = np.arange((- int((input_N / 2))), int((input_N / 2)))
(X, Y) = np.meshgrid(x, x)
R = np.sqrt((((X * input_dx) ** 2) + ((Y * input_dx) ** 2)))
R[(int((input_N / 2)), int((input_N / 2)))] = 0
rbin = ((input_N * R) / R.max()).astype(np.int)
bin_sampling = ((R.max() * np.arange(0, (rbin.max() + 1))) / rbin.max())
cutoff_aperture = (((input_N * input_dx) / 4) / 2)
where_within_aperture = (bin_sampling < cutoff_aperture)
bin_sampling_aperture = bin_sampling[(bin_sampling < cutoff_aperture)]
D_avg_1d = scipy.ndimage.mean(D_r_avg, labels=rbin, index=np.arange(0, (rbin.max() + 1)))
D_avg_1d_cutoff = D_avg_1d[where_within_aperture]
k0 = ((2 * np.pi) / input_L0)
l0 = 0.001
D_mVK_2d = ((((7.75 * (r0_scrn_sim ** ((- 5.0) / 3.0))) * (l0 ** ((- 1.0) / 3.0))) * (R ** 2.0)) * ((1 / ((1 + ((2.03 * (R ** 2.0)) / (l0 ** 2.0))) ** (1.0 / 6.0))) - (0.72 * ((k0 * l0) ** (1.0 / 3.0)))))
D_Kolm_2d = (6.88 * ((R / r0_scrn_sim) ** (5 / 3)))
D_mVK_1d = ((((7.75 * (r0_scrn_sim ** ((- 5.0) / 3.0))) * (l0 ** ((- 1.0) / 3.0))) * (bin_sampling_aperture ** 2.0)) * ((1 / ((1 + ((2.03 * (bin_sampling_aperture ** 2.0)) / (l0 ** 2.0))) ** (1.0 / 6.0))) - (0.72 * ((k0 * l0) ** (1.0 / 3.0)))))
D_Kolm_1d = (6.88 * ((bin_sampling_aperture / r0_scrn_sim) ** (5 / 3)))
plt.plot(bin_sampling_aperture, D_avg_1d_cutoff, 'r', label='Simulation')
plt.plot(bin_sampling_aperture, D_mVK_1d, 'k--', label='MvK')
plt.plot(bin_sampling_aperture, D_Kolm_1d, 'b--', label='Kolmogorov')
plt.legend()
plt.xlabel('$\\Delta r$ [meters]')
plt.ylabel('$D_\\Phi$(|$\\Delta r$|) [rad$^2$]')
plt.show() | def evaluate_phase_structure_function_accuracy(input_N, input_dx, input_cn2, num_screen_draws=20, input_propdist=3000.0, input_wave=1e-06, input_num_subharmonics=5, input_L0=1000.0, input_n_screen_sim=4):
'\n Generate many different phase screens for a simulation setup and compare the statistical structure funciton of the\n phase screen to theoretical fits for a\n\n :param input_N: N for the simulation\n :param input_dx: sampling at the source plane for the simulation\n :param input_cn2: the turbulence of the simulation\n :param num_screen_draws: the number of random screens draws for computing statistics\n :param input_propdist: the propagation distance of the simulation\n :param input_wave: the wavelength in meters for sim\n :param input_num_subharmonics: the number of subharmonics for low frequency screen draws\n :param input_L0: the outer scale of the simulation\n :param input_n_screen_sim: the number of screens used in the simulation for propagating a beam\n :return:\n '
sim = wp.wavepy(N=input_N, L0=input_L0, dx=input_dx, Rdx=input_dx, Cn2=input_cn2, PropDist=input_propdist, NumScr=input_n_screen_sim, W0=5, SideLen=(input_N * input_dx), wvl=input_wave)
r0_scrn_sim = sim.r0scrn
sum_Dr = 0
for nscr in range(0, num_screen_draws):
phz_hi = sim.PhaseScreen(input_dx)
phz_lo = sim.SubHarmonicComp(input_num_subharmonics, input_dx)
(fig, ax) = plt.subplots(1, 3)
ax[0].imshow(phz_hi)
ax[1].imshow(phz_lo)
ax[2].imshow((phz_lo + phz_hi))
plt.show()
phz = (phz_lo + phz_hi)
D_bos = sim.StructFunc(phz)
sum_Dr = (sum_Dr + D_bos)
D_r_avg = (sum_Dr / num_screen_draws)
x = np.arange((- int((input_N / 2))), int((input_N / 2)))
(X, Y) = np.meshgrid(x, x)
R = np.sqrt((((X * input_dx) ** 2) + ((Y * input_dx) ** 2)))
R[(int((input_N / 2)), int((input_N / 2)))] = 0
rbin = ((input_N * R) / R.max()).astype(np.int)
bin_sampling = ((R.max() * np.arange(0, (rbin.max() + 1))) / rbin.max())
cutoff_aperture = (((input_N * input_dx) / 4) / 2)
where_within_aperture = (bin_sampling < cutoff_aperture)
bin_sampling_aperture = bin_sampling[(bin_sampling < cutoff_aperture)]
D_avg_1d = scipy.ndimage.mean(D_r_avg, labels=rbin, index=np.arange(0, (rbin.max() + 1)))
D_avg_1d_cutoff = D_avg_1d[where_within_aperture]
k0 = ((2 * np.pi) / input_L0)
l0 = 0.001
D_mVK_2d = ((((7.75 * (r0_scrn_sim ** ((- 5.0) / 3.0))) * (l0 ** ((- 1.0) / 3.0))) * (R ** 2.0)) * ((1 / ((1 + ((2.03 * (R ** 2.0)) / (l0 ** 2.0))) ** (1.0 / 6.0))) - (0.72 * ((k0 * l0) ** (1.0 / 3.0)))))
D_Kolm_2d = (6.88 * ((R / r0_scrn_sim) ** (5 / 3)))
D_mVK_1d = ((((7.75 * (r0_scrn_sim ** ((- 5.0) / 3.0))) * (l0 ** ((- 1.0) / 3.0))) * (bin_sampling_aperture ** 2.0)) * ((1 / ((1 + ((2.03 * (bin_sampling_aperture ** 2.0)) / (l0 ** 2.0))) ** (1.0 / 6.0))) - (0.72 * ((k0 * l0) ** (1.0 / 3.0)))))
D_Kolm_1d = (6.88 * ((bin_sampling_aperture / r0_scrn_sim) ** (5 / 3)))
plt.plot(bin_sampling_aperture, D_avg_1d_cutoff, 'r', label='Simulation')
plt.plot(bin_sampling_aperture, D_mVK_1d, 'k--', label='MvK')
plt.plot(bin_sampling_aperture, D_Kolm_1d, 'b--', label='Kolmogorov')
plt.legend()
plt.xlabel('$\\Delta r$ [meters]')
plt.ylabel('$D_\\Phi$(|$\\Delta r$|) [rad$^2$]')
plt.show()<|docstring|>Generate many different phase screens for a simulation setup and compare the statistical structure funciton of the
phase screen to theoretical fits for a
:param input_N: N for the simulation
:param input_dx: sampling at the source plane for the simulation
:param input_cn2: the turbulence of the simulation
:param num_screen_draws: the number of random screens draws for computing statistics
:param input_propdist: the propagation distance of the simulation
:param input_wave: the wavelength in meters for sim
:param input_num_subharmonics: the number of subharmonics for low frequency screen draws
:param input_L0: the outer scale of the simulation
:param input_n_screen_sim: the number of screens used in the simulation for propagating a beam
:return:<|endoftext|> |
ecacab65c84f9c685e1600a94b708a9939134358119111aae8d83f70132ae00f | def evaluate_phase_structure_function_accuracy_postprocess(report_filename, sim_result_directory):
'\n Evaluate the phase structure function accuracy on a series of phase screens that were generated for the purpose of\n running a turbulence time series simulation\n\n Note: I have never had much confidence in this metric, as the scaling of the phase screens greatly affects the\n accuracy of the results\n\n :param report_filename: the filename including path for the output report of the turbulence simulation\n :param sim_result_directory: the directory in which the turbulence output simulation files over the timesteps have\n been written out\n\n :return: no return at the moment, just display plots\n '
with open(report_filename, 'r') as f:
sim_dict = yaml.load(f)
tsteps = sim_dict['Timesteps']
dx = sim_dict['Source dx']
N = sim_dict['N']
r0 = sim_dict['r0']
L0 = sim_dict['L0']
l0 = sim_dict['l0']
cn2 = float(sim_dict['Cn2'])
wave = float(sim_dict['wavelength'])
propdist = sim_dict['Propagation Dist']
nscreens = sim_dict['Num Screens']
Rdx = sim_dict['Receiver Rdx']
sim = wp.wavepy(N=N, L0=L0, dx=dx, Rdx=Rdx, Cn2=cn2, PropDist=propdist, NumScr=nscreens, W0=5, SideLen=(N * dx), wvl=wave)
r0_scrn_sim = sim.r0scrn
delta_f = (1 / (N * dx))
dzProps = (np.ones((nscreens + 2)) * (propdist / nscreens))
dzProps[0:2] = (0.5 * (propdist / nscreens))
dzProps[nscreens:(nscreens + 2)] = (0.5 * (propdist / nscreens))
PropLocs = np.zeros((nscreens + 3))
for zval in range(0, (nscreens + 2)):
PropLocs[(zval + 1)] = (PropLocs[zval] + dzProps[zval])
ScrnLoc = np.concatenate((PropLocs[1:nscreens], np.array([PropLocs[(nscreens + 1)]])), axis=0)
FracPropDist = (PropLocs / propdist)
dx_phase_screens = (((Rdx - dx) * FracPropDist) + dx)
cutoff_aperture = ((N * dx) / 4)
for nscr in range(0, nscreens):
dx_scr = dx_phase_screens[nscr]
sum_Dr = 0
for t in range(0, tsteps):
fname_scr_draw = (((((sim_result_directory + '_scr') + str(nscr)) + '_t') + '{:04d}'.format(t)) + '.npy')
scr_draw = np.load(fname_scr_draw)
D_bos = sim.StructFunc(scr_draw)
sum_Dr = (sum_Dr + D_bos)
D_r_avg = (sum_Dr / tsteps)
x = np.arange((- int((N / 2))), int((N / 2)))
(X, Y) = np.meshgrid(x, x)
R = np.sqrt((((X * dx_scr) ** 2) + ((Y * dx_scr) ** 2)))
R[(int((N / 2)), int((N / 2)))] = 0
rbin = ((N * R) / R.max()).astype(np.int)
bin_sampling = ((R.max() * np.arange(0, (rbin.max() + 1))) / rbin.max())
where_within_aperture = (bin_sampling < cutoff_aperture)
bin_sampling_aperture = bin_sampling[(bin_sampling < cutoff_aperture)]
D_avg_1d = scipy.ndimage.maximum(D_r_avg, labels=rbin, index=np.arange(0, (rbin.max() + 1)))
D_avg_1d_cutoff = D_avg_1d[where_within_aperture]
k0 = ((2 * np.pi) / L0)
l0 = 0.001
D_mVK_2d = ((((7.75 * (r0_scrn_sim ** ((- 5.0) / 3.0))) * (l0 ** ((- 1.0) / 3.0))) * (R ** 2.0)) * ((1 / ((1 + ((2.03 * (R ** 2.0)) / (l0 ** 2.0))) ** (1.0 / 6.0))) - (0.72 * ((k0 * l0) ** (1.0 / 3.0)))))
D_Kolm_2d = (6.88 * ((R / r0_scrn_sim) ** (5 / 3)))
D_mVK_1d = ((((7.75 * (r0_scrn_sim ** ((- 5.0) / 3.0))) * (l0 ** ((- 1.0) / 3.0))) * (bin_sampling_aperture ** 2.0)) * ((1 / ((1 + ((2.03 * (bin_sampling_aperture ** 2.0)) / (l0 ** 2.0))) ** (1.0 / 6.0))) - (0.72 * ((k0 * l0) ** (1.0 / 3.0)))))
D_Kolm_1d = (6.88 * ((bin_sampling_aperture / r0_scrn_sim) ** (5 / 3)))
plt.plot(bin_sampling_aperture, D_avg_1d_cutoff, 'r', label='Simulation')
plt.plot(bin_sampling_aperture, D_mVK_1d, 'k--', label='MvK')
plt.plot(bin_sampling_aperture, D_Kolm_1d, 'b--', label='Kolmogorov')
plt.legend()
plt.xlabel('$\\Delta r$ [meters]')
plt.ylabel('$D_\\Phi$(|$\\Delta r$|) [rad$^2$]')
plt.show() | Evaluate the phase structure function accuracy on a series of phase screens that were generated for the purpose of
running a turbulence time series simulation
Note: I have never had much confidence in this metric, as the scaling of the phase screens greatly affects the
accuracy of the results
:param report_filename: the filename including path for the output report of the turbulence simulation
:param sim_result_directory: the directory in which the turbulence output simulation files over the timesteps have
been written out
:return: no return at the moment, just display plots | sim_evaluation_module.py | evaluate_phase_structure_function_accuracy_postprocess | gregbad/WavePy | 0 | python | def evaluate_phase_structure_function_accuracy_postprocess(report_filename, sim_result_directory):
'\n Evaluate the phase structure function accuracy on a series of phase screens that were generated for the purpose of\n running a turbulence time series simulation\n\n Note: I have never had much confidence in this metric, as the scaling of the phase screens greatly affects the\n accuracy of the results\n\n :param report_filename: the filename including path for the output report of the turbulence simulation\n :param sim_result_directory: the directory in which the turbulence output simulation files over the timesteps have\n been written out\n\n :return: no return at the moment, just display plots\n '
with open(report_filename, 'r') as f:
sim_dict = yaml.load(f)
tsteps = sim_dict['Timesteps']
dx = sim_dict['Source dx']
N = sim_dict['N']
r0 = sim_dict['r0']
L0 = sim_dict['L0']
l0 = sim_dict['l0']
cn2 = float(sim_dict['Cn2'])
wave = float(sim_dict['wavelength'])
propdist = sim_dict['Propagation Dist']
nscreens = sim_dict['Num Screens']
Rdx = sim_dict['Receiver Rdx']
sim = wp.wavepy(N=N, L0=L0, dx=dx, Rdx=Rdx, Cn2=cn2, PropDist=propdist, NumScr=nscreens, W0=5, SideLen=(N * dx), wvl=wave)
r0_scrn_sim = sim.r0scrn
delta_f = (1 / (N * dx))
dzProps = (np.ones((nscreens + 2)) * (propdist / nscreens))
dzProps[0:2] = (0.5 * (propdist / nscreens))
dzProps[nscreens:(nscreens + 2)] = (0.5 * (propdist / nscreens))
PropLocs = np.zeros((nscreens + 3))
for zval in range(0, (nscreens + 2)):
PropLocs[(zval + 1)] = (PropLocs[zval] + dzProps[zval])
ScrnLoc = np.concatenate((PropLocs[1:nscreens], np.array([PropLocs[(nscreens + 1)]])), axis=0)
FracPropDist = (PropLocs / propdist)
dx_phase_screens = (((Rdx - dx) * FracPropDist) + dx)
cutoff_aperture = ((N * dx) / 4)
for nscr in range(0, nscreens):
dx_scr = dx_phase_screens[nscr]
sum_Dr = 0
for t in range(0, tsteps):
fname_scr_draw = (((((sim_result_directory + '_scr') + str(nscr)) + '_t') + '{:04d}'.format(t)) + '.npy')
scr_draw = np.load(fname_scr_draw)
D_bos = sim.StructFunc(scr_draw)
sum_Dr = (sum_Dr + D_bos)
D_r_avg = (sum_Dr / tsteps)
x = np.arange((- int((N / 2))), int((N / 2)))
(X, Y) = np.meshgrid(x, x)
R = np.sqrt((((X * dx_scr) ** 2) + ((Y * dx_scr) ** 2)))
R[(int((N / 2)), int((N / 2)))] = 0
rbin = ((N * R) / R.max()).astype(np.int)
bin_sampling = ((R.max() * np.arange(0, (rbin.max() + 1))) / rbin.max())
where_within_aperture = (bin_sampling < cutoff_aperture)
bin_sampling_aperture = bin_sampling[(bin_sampling < cutoff_aperture)]
D_avg_1d = scipy.ndimage.maximum(D_r_avg, labels=rbin, index=np.arange(0, (rbin.max() + 1)))
D_avg_1d_cutoff = D_avg_1d[where_within_aperture]
k0 = ((2 * np.pi) / L0)
l0 = 0.001
D_mVK_2d = ((((7.75 * (r0_scrn_sim ** ((- 5.0) / 3.0))) * (l0 ** ((- 1.0) / 3.0))) * (R ** 2.0)) * ((1 / ((1 + ((2.03 * (R ** 2.0)) / (l0 ** 2.0))) ** (1.0 / 6.0))) - (0.72 * ((k0 * l0) ** (1.0 / 3.0)))))
D_Kolm_2d = (6.88 * ((R / r0_scrn_sim) ** (5 / 3)))
D_mVK_1d = ((((7.75 * (r0_scrn_sim ** ((- 5.0) / 3.0))) * (l0 ** ((- 1.0) / 3.0))) * (bin_sampling_aperture ** 2.0)) * ((1 / ((1 + ((2.03 * (bin_sampling_aperture ** 2.0)) / (l0 ** 2.0))) ** (1.0 / 6.0))) - (0.72 * ((k0 * l0) ** (1.0 / 3.0)))))
D_Kolm_1d = (6.88 * ((bin_sampling_aperture / r0_scrn_sim) ** (5 / 3)))
plt.plot(bin_sampling_aperture, D_avg_1d_cutoff, 'r', label='Simulation')
plt.plot(bin_sampling_aperture, D_mVK_1d, 'k--', label='MvK')
plt.plot(bin_sampling_aperture, D_Kolm_1d, 'b--', label='Kolmogorov')
plt.legend()
plt.xlabel('$\\Delta r$ [meters]')
plt.ylabel('$D_\\Phi$(|$\\Delta r$|) [rad$^2$]')
plt.show() | def evaluate_phase_structure_function_accuracy_postprocess(report_filename, sim_result_directory):
'\n Evaluate the phase structure function accuracy on a series of phase screens that were generated for the purpose of\n running a turbulence time series simulation\n\n Note: I have never had much confidence in this metric, as the scaling of the phase screens greatly affects the\n accuracy of the results\n\n :param report_filename: the filename including path for the output report of the turbulence simulation\n :param sim_result_directory: the directory in which the turbulence output simulation files over the timesteps have\n been written out\n\n :return: no return at the moment, just display plots\n '
with open(report_filename, 'r') as f:
sim_dict = yaml.load(f)
tsteps = sim_dict['Timesteps']
dx = sim_dict['Source dx']
N = sim_dict['N']
r0 = sim_dict['r0']
L0 = sim_dict['L0']
l0 = sim_dict['l0']
cn2 = float(sim_dict['Cn2'])
wave = float(sim_dict['wavelength'])
propdist = sim_dict['Propagation Dist']
nscreens = sim_dict['Num Screens']
Rdx = sim_dict['Receiver Rdx']
sim = wp.wavepy(N=N, L0=L0, dx=dx, Rdx=Rdx, Cn2=cn2, PropDist=propdist, NumScr=nscreens, W0=5, SideLen=(N * dx), wvl=wave)
r0_scrn_sim = sim.r0scrn
delta_f = (1 / (N * dx))
dzProps = (np.ones((nscreens + 2)) * (propdist / nscreens))
dzProps[0:2] = (0.5 * (propdist / nscreens))
dzProps[nscreens:(nscreens + 2)] = (0.5 * (propdist / nscreens))
PropLocs = np.zeros((nscreens + 3))
for zval in range(0, (nscreens + 2)):
PropLocs[(zval + 1)] = (PropLocs[zval] + dzProps[zval])
ScrnLoc = np.concatenate((PropLocs[1:nscreens], np.array([PropLocs[(nscreens + 1)]])), axis=0)
FracPropDist = (PropLocs / propdist)
dx_phase_screens = (((Rdx - dx) * FracPropDist) + dx)
cutoff_aperture = ((N * dx) / 4)
for nscr in range(0, nscreens):
dx_scr = dx_phase_screens[nscr]
sum_Dr = 0
for t in range(0, tsteps):
fname_scr_draw = (((((sim_result_directory + '_scr') + str(nscr)) + '_t') + '{:04d}'.format(t)) + '.npy')
scr_draw = np.load(fname_scr_draw)
D_bos = sim.StructFunc(scr_draw)
sum_Dr = (sum_Dr + D_bos)
D_r_avg = (sum_Dr / tsteps)
x = np.arange((- int((N / 2))), int((N / 2)))
(X, Y) = np.meshgrid(x, x)
R = np.sqrt((((X * dx_scr) ** 2) + ((Y * dx_scr) ** 2)))
R[(int((N / 2)), int((N / 2)))] = 0
rbin = ((N * R) / R.max()).astype(np.int)
bin_sampling = ((R.max() * np.arange(0, (rbin.max() + 1))) / rbin.max())
where_within_aperture = (bin_sampling < cutoff_aperture)
bin_sampling_aperture = bin_sampling[(bin_sampling < cutoff_aperture)]
D_avg_1d = scipy.ndimage.maximum(D_r_avg, labels=rbin, index=np.arange(0, (rbin.max() + 1)))
D_avg_1d_cutoff = D_avg_1d[where_within_aperture]
k0 = ((2 * np.pi) / L0)
l0 = 0.001
D_mVK_2d = ((((7.75 * (r0_scrn_sim ** ((- 5.0) / 3.0))) * (l0 ** ((- 1.0) / 3.0))) * (R ** 2.0)) * ((1 / ((1 + ((2.03 * (R ** 2.0)) / (l0 ** 2.0))) ** (1.0 / 6.0))) - (0.72 * ((k0 * l0) ** (1.0 / 3.0)))))
D_Kolm_2d = (6.88 * ((R / r0_scrn_sim) ** (5 / 3)))
D_mVK_1d = ((((7.75 * (r0_scrn_sim ** ((- 5.0) / 3.0))) * (l0 ** ((- 1.0) / 3.0))) * (bin_sampling_aperture ** 2.0)) * ((1 / ((1 + ((2.03 * (bin_sampling_aperture ** 2.0)) / (l0 ** 2.0))) ** (1.0 / 6.0))) - (0.72 * ((k0 * l0) ** (1.0 / 3.0)))))
D_Kolm_1d = (6.88 * ((bin_sampling_aperture / r0_scrn_sim) ** (5 / 3)))
plt.plot(bin_sampling_aperture, D_avg_1d_cutoff, 'r', label='Simulation')
plt.plot(bin_sampling_aperture, D_mVK_1d, 'k--', label='MvK')
plt.plot(bin_sampling_aperture, D_Kolm_1d, 'b--', label='Kolmogorov')
plt.legend()
plt.xlabel('$\\Delta r$ [meters]')
plt.ylabel('$D_\\Phi$(|$\\Delta r$|) [rad$^2$]')
plt.show()<|docstring|>Evaluate the phase structure function accuracy on a series of phase screens that were generated for the purpose of
running a turbulence time series simulation
Note: I have never had much confidence in this metric, as the scaling of the phase screens greatly affects the
accuracy of the results
:param report_filename: the filename including path for the output report of the turbulence simulation
:param sim_result_directory: the directory in which the turbulence output simulation files over the timesteps have
been written out
:return: no return at the moment, just display plots<|endoftext|> |
aa1538f259affa829662a9a4cfc8d9ca62fa873cd48a07f7d7c2365b77205d28 | def evaluate_PSD_accuracy(report_filename, sim_result_directory, min_max_freq=[1, 100], nsamples_psd=250):
'\n Evaluate the PSD of a series of phase screens from a turbulence evolved simulation\n\n :param report_filename: the filename including path for the output report of the turbulence simulation\n :param sim_result_directory: the directory in which the turbulence output simulation files over the timesteps have\n been written out\n :param min_max_freq: the minimum and maximum frequencies to include in the output plots\n :param nsamples_psd: the number of samples over which to bin when averaging the radial PSD\n :return: no return at the moment, just display plots\n '
with open(report_filename, 'r') as f:
sim_dict = yaml.load(f)
tsteps = sim_dict['Timesteps']
dx = sim_dict['Source dx']
N = sim_dict['N']
r0 = sim_dict['r0']
L0 = sim_dict['L0']
l0 = sim_dict['l0']
cn2 = float(sim_dict['Cn2'])
wave = float(sim_dict['wavelength'])
propdist = sim_dict['Propagation Dist']
nscreens = sim_dict['Num Screens']
a = int((N / 2))
nx = np.arange(0, a)
deltaf = (1 / (N * dx))
k = ((2 * np.pi) / wave)
r0scrn = ((((0.423 * (k ** 2)) * cn2) * (propdist / nscreens)) ** ((- 3.0) / 5.0))
x = np.arange((- int((N / 2))), int((N / 2)))
(X, Y) = np.meshgrid(x, x)
R = np.sqrt((((((X * 2) * np.pi) * deltaf) ** 2) + ((((Y * 2) * np.pi) * deltaf) ** 2)))
R[(int((N / 2)), int((N / 2)))] = 0
rbin = ((nsamples_psd * R) / R.max()).astype(np.int)
bin_sampling = ((R.max() * np.arange(0, (rbin.max() + 1))) / rbin.max())
f_sampling = ((bin_sampling / 2.0) / np.pi)
min_freq = min_max_freq[0]
max_freq = min_max_freq[1]
freq_bound_indices = np.logical_and((f_sampling > min_freq), (f_sampling < max_freq))
bin_sampling_bounds = bin_sampling[freq_bound_indices]
for scr_idx in range(0, nscreens):
(fig, ax) = plt.subplots(figsize=(7, 5))
for t in range(0, tsteps):
fname_scr_draw = (((((sim_result_directory + '_scr') + str(scr_idx)) + '_t') + '{:04d}'.format(t)) + '.npy')
scr_draw = np.load(fname_scr_draw)
h = np.hamming(N)
ham2d = np.outer(h, h)
noise_gain = (np.sum((ham2d ** 2.0)) / (N ** 2))
coherent_gain = (np.sum(ham2d) / (N ** 2))
fbin = (1 / (N * dx))
scale_factor = ((noise_gain * (fbin ** 2.0)) / (coherent_gain ** 3.0))
fft_ph = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(((scr_draw * ham2d) / (N ** 2)))))
PSD_emp_2 = ((np.abs(fft_ph) ** 2.0) / scale_factor)
PSD_avg_2 = scipy.ndimage.mean(PSD_emp_2, labels=rbin, index=np.arange(0, (rbin.max() + 1)))
PSD_avg_2_bounds = PSD_avg_2[freq_bound_indices]
ax.scatter(((bin_sampling_bounds / 2) / np.pi), PSD_avg_2_bounds, c='b', alpha=0.2)
K0 = ((2 * np.pi) / L0)
PSD_theor = ((0.49 * (r0scrn ** ((- 5) / 3))) * (((bin_sampling_bounds ** 2) + (K0 ** 2.0)) ** ((- 11) / 6)))
ax.plot(((bin_sampling_bounds / 2) / np.pi), PSD_theor, 'red', label='theory')
ax.set_xlim([min_freq, max_freq])
ax.set_yscale('log')
ax.set_xscale('log')
ax.legend()
ax.set_title(('Screen #' + str(scr_idx)))
ax.set_xlabel('spatial frequency [1/m]')
ax.set_ylabel('PSD(f)')
plt.show() | Evaluate the PSD of a series of phase screens from a turbulence evolved simulation
:param report_filename: the filename including path for the output report of the turbulence simulation
:param sim_result_directory: the directory in which the turbulence output simulation files over the timesteps have
been written out
:param min_max_freq: the minimum and maximum frequencies to include in the output plots
:param nsamples_psd: the number of samples over which to bin when averaging the radial PSD
:return: no return at the moment, just display plots | sim_evaluation_module.py | evaluate_PSD_accuracy | gregbad/WavePy | 0 | python | def evaluate_PSD_accuracy(report_filename, sim_result_directory, min_max_freq=[1, 100], nsamples_psd=250):
'\n Evaluate the PSD of a series of phase screens from a turbulence evolved simulation\n\n :param report_filename: the filename including path for the output report of the turbulence simulation\n :param sim_result_directory: the directory in which the turbulence output simulation files over the timesteps have\n been written out\n :param min_max_freq: the minimum and maximum frequencies to include in the output plots\n :param nsamples_psd: the number of samples over which to bin when averaging the radial PSD\n :return: no return at the moment, just display plots\n '
with open(report_filename, 'r') as f:
sim_dict = yaml.load(f)
tsteps = sim_dict['Timesteps']
dx = sim_dict['Source dx']
N = sim_dict['N']
r0 = sim_dict['r0']
L0 = sim_dict['L0']
l0 = sim_dict['l0']
cn2 = float(sim_dict['Cn2'])
wave = float(sim_dict['wavelength'])
propdist = sim_dict['Propagation Dist']
nscreens = sim_dict['Num Screens']
a = int((N / 2))
nx = np.arange(0, a)
deltaf = (1 / (N * dx))
k = ((2 * np.pi) / wave)
r0scrn = ((((0.423 * (k ** 2)) * cn2) * (propdist / nscreens)) ** ((- 3.0) / 5.0))
x = np.arange((- int((N / 2))), int((N / 2)))
(X, Y) = np.meshgrid(x, x)
R = np.sqrt((((((X * 2) * np.pi) * deltaf) ** 2) + ((((Y * 2) * np.pi) * deltaf) ** 2)))
R[(int((N / 2)), int((N / 2)))] = 0
rbin = ((nsamples_psd * R) / R.max()).astype(np.int)
bin_sampling = ((R.max() * np.arange(0, (rbin.max() + 1))) / rbin.max())
f_sampling = ((bin_sampling / 2.0) / np.pi)
min_freq = min_max_freq[0]
max_freq = min_max_freq[1]
freq_bound_indices = np.logical_and((f_sampling > min_freq), (f_sampling < max_freq))
bin_sampling_bounds = bin_sampling[freq_bound_indices]
for scr_idx in range(0, nscreens):
(fig, ax) = plt.subplots(figsize=(7, 5))
for t in range(0, tsteps):
fname_scr_draw = (((((sim_result_directory + '_scr') + str(scr_idx)) + '_t') + '{:04d}'.format(t)) + '.npy')
scr_draw = np.load(fname_scr_draw)
h = np.hamming(N)
ham2d = np.outer(h, h)
noise_gain = (np.sum((ham2d ** 2.0)) / (N ** 2))
coherent_gain = (np.sum(ham2d) / (N ** 2))
fbin = (1 / (N * dx))
scale_factor = ((noise_gain * (fbin ** 2.0)) / (coherent_gain ** 3.0))
fft_ph = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(((scr_draw * ham2d) / (N ** 2)))))
PSD_emp_2 = ((np.abs(fft_ph) ** 2.0) / scale_factor)
PSD_avg_2 = scipy.ndimage.mean(PSD_emp_2, labels=rbin, index=np.arange(0, (rbin.max() + 1)))
PSD_avg_2_bounds = PSD_avg_2[freq_bound_indices]
ax.scatter(((bin_sampling_bounds / 2) / np.pi), PSD_avg_2_bounds, c='b', alpha=0.2)
K0 = ((2 * np.pi) / L0)
PSD_theor = ((0.49 * (r0scrn ** ((- 5) / 3))) * (((bin_sampling_bounds ** 2) + (K0 ** 2.0)) ** ((- 11) / 6)))
ax.plot(((bin_sampling_bounds / 2) / np.pi), PSD_theor, 'red', label='theory')
ax.set_xlim([min_freq, max_freq])
ax.set_yscale('log')
ax.set_xscale('log')
ax.legend()
ax.set_title(('Screen #' + str(scr_idx)))
ax.set_xlabel('spatial frequency [1/m]')
ax.set_ylabel('PSD(f)')
plt.show() | def evaluate_PSD_accuracy(report_filename, sim_result_directory, min_max_freq=[1, 100], nsamples_psd=250):
'\n Evaluate the PSD of a series of phase screens from a turbulence evolved simulation\n\n :param report_filename: the filename including path for the output report of the turbulence simulation\n :param sim_result_directory: the directory in which the turbulence output simulation files over the timesteps have\n been written out\n :param min_max_freq: the minimum and maximum frequencies to include in the output plots\n :param nsamples_psd: the number of samples over which to bin when averaging the radial PSD\n :return: no return at the moment, just display plots\n '
with open(report_filename, 'r') as f:
sim_dict = yaml.load(f)
tsteps = sim_dict['Timesteps']
dx = sim_dict['Source dx']
N = sim_dict['N']
r0 = sim_dict['r0']
L0 = sim_dict['L0']
l0 = sim_dict['l0']
cn2 = float(sim_dict['Cn2'])
wave = float(sim_dict['wavelength'])
propdist = sim_dict['Propagation Dist']
nscreens = sim_dict['Num Screens']
a = int((N / 2))
nx = np.arange(0, a)
deltaf = (1 / (N * dx))
k = ((2 * np.pi) / wave)
r0scrn = ((((0.423 * (k ** 2)) * cn2) * (propdist / nscreens)) ** ((- 3.0) / 5.0))
x = np.arange((- int((N / 2))), int((N / 2)))
(X, Y) = np.meshgrid(x, x)
R = np.sqrt((((((X * 2) * np.pi) * deltaf) ** 2) + ((((Y * 2) * np.pi) * deltaf) ** 2)))
R[(int((N / 2)), int((N / 2)))] = 0
rbin = ((nsamples_psd * R) / R.max()).astype(np.int)
bin_sampling = ((R.max() * np.arange(0, (rbin.max() + 1))) / rbin.max())
f_sampling = ((bin_sampling / 2.0) / np.pi)
min_freq = min_max_freq[0]
max_freq = min_max_freq[1]
freq_bound_indices = np.logical_and((f_sampling > min_freq), (f_sampling < max_freq))
bin_sampling_bounds = bin_sampling[freq_bound_indices]
for scr_idx in range(0, nscreens):
(fig, ax) = plt.subplots(figsize=(7, 5))
for t in range(0, tsteps):
fname_scr_draw = (((((sim_result_directory + '_scr') + str(scr_idx)) + '_t') + '{:04d}'.format(t)) + '.npy')
scr_draw = np.load(fname_scr_draw)
h = np.hamming(N)
ham2d = np.outer(h, h)
noise_gain = (np.sum((ham2d ** 2.0)) / (N ** 2))
coherent_gain = (np.sum(ham2d) / (N ** 2))
fbin = (1 / (N * dx))
scale_factor = ((noise_gain * (fbin ** 2.0)) / (coherent_gain ** 3.0))
fft_ph = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(((scr_draw * ham2d) / (N ** 2)))))
PSD_emp_2 = ((np.abs(fft_ph) ** 2.0) / scale_factor)
PSD_avg_2 = scipy.ndimage.mean(PSD_emp_2, labels=rbin, index=np.arange(0, (rbin.max() + 1)))
PSD_avg_2_bounds = PSD_avg_2[freq_bound_indices]
ax.scatter(((bin_sampling_bounds / 2) / np.pi), PSD_avg_2_bounds, c='b', alpha=0.2)
K0 = ((2 * np.pi) / L0)
PSD_theor = ((0.49 * (r0scrn ** ((- 5) / 3))) * (((bin_sampling_bounds ** 2) + (K0 ** 2.0)) ** ((- 11) / 6)))
ax.plot(((bin_sampling_bounds / 2) / np.pi), PSD_theor, 'red', label='theory')
ax.set_xlim([min_freq, max_freq])
ax.set_yscale('log')
ax.set_xscale('log')
ax.legend()
ax.set_title(('Screen #' + str(scr_idx)))
ax.set_xlabel('spatial frequency [1/m]')
ax.set_ylabel('PSD(f)')
plt.show()<|docstring|>Evaluate the PSD of a series of phase screens from a turbulence evolved simulation
:param report_filename: the filename including path for the output report of the turbulence simulation
:param sim_result_directory: the directory in which the turbulence output simulation files over the timesteps have
been written out
:param min_max_freq: the minimum and maximum frequencies to include in the output plots
:param nsamples_psd: the number of samples over which to bin when averaging the radial PSD
:return: no return at the moment, just display plots<|endoftext|> |
29a3eff471c0e6b16b2b27e6bee7a82058707b2d4375be530a997564bfeb9606 | @pytest.mark.serial
def test_delete_zone_success(shared_zone_test_context):
'\n Test deleting a zone\n '
client = shared_zone_test_context.ok_vinyldns_client
result_zone = None
try:
zone_name = f'one-time{shared_zone_test_context.partition_id}'
zone = {'name': zone_name, 'email': '[email protected]', 'adminGroupId': shared_zone_test_context.ok_group['id'], 'connection': {'name': 'vinyldns.', 'keyName': VinylDNSTestContext.dns_key_name, 'key': VinylDNSTestContext.dns_key, 'primaryServer': VinylDNSTestContext.name_server_ip}, 'transferConnection': {'name': 'vinyldns.', 'keyName': VinylDNSTestContext.dns_key_name, 'key': VinylDNSTestContext.dns_key, 'primaryServer': VinylDNSTestContext.name_server_ip}}
result = client.create_zone(zone, status=202)
result_zone = result['zone']
client.wait_until_zone_active(result_zone['id'])
client.delete_zone(result_zone['id'], status=202)
client.wait_until_zone_deleted(result_zone['id'])
client.get_zone(result_zone['id'], status=404)
result_zone = None
finally:
if result_zone:
client.abandon_zones([result_zone['id']], status=202) | Test deleting a zone | modules/api/src/test/functional/tests/zones/delete_zone_test.py | test_delete_zone_success | Jay07GIT/vinyldns | 0 | python | @pytest.mark.serial
def test_delete_zone_success(shared_zone_test_context):
'\n \n '
client = shared_zone_test_context.ok_vinyldns_client
result_zone = None
try:
zone_name = f'one-time{shared_zone_test_context.partition_id}'
zone = {'name': zone_name, 'email': '[email protected]', 'adminGroupId': shared_zone_test_context.ok_group['id'], 'connection': {'name': 'vinyldns.', 'keyName': VinylDNSTestContext.dns_key_name, 'key': VinylDNSTestContext.dns_key, 'primaryServer': VinylDNSTestContext.name_server_ip}, 'transferConnection': {'name': 'vinyldns.', 'keyName': VinylDNSTestContext.dns_key_name, 'key': VinylDNSTestContext.dns_key, 'primaryServer': VinylDNSTestContext.name_server_ip}}
result = client.create_zone(zone, status=202)
result_zone = result['zone']
client.wait_until_zone_active(result_zone['id'])
client.delete_zone(result_zone['id'], status=202)
client.wait_until_zone_deleted(result_zone['id'])
client.get_zone(result_zone['id'], status=404)
result_zone = None
finally:
if result_zone:
client.abandon_zones([result_zone['id']], status=202) | @pytest.mark.serial
def test_delete_zone_success(shared_zone_test_context):
'\n \n '
client = shared_zone_test_context.ok_vinyldns_client
result_zone = None
try:
zone_name = f'one-time{shared_zone_test_context.partition_id}'
zone = {'name': zone_name, 'email': '[email protected]', 'adminGroupId': shared_zone_test_context.ok_group['id'], 'connection': {'name': 'vinyldns.', 'keyName': VinylDNSTestContext.dns_key_name, 'key': VinylDNSTestContext.dns_key, 'primaryServer': VinylDNSTestContext.name_server_ip}, 'transferConnection': {'name': 'vinyldns.', 'keyName': VinylDNSTestContext.dns_key_name, 'key': VinylDNSTestContext.dns_key, 'primaryServer': VinylDNSTestContext.name_server_ip}}
result = client.create_zone(zone, status=202)
result_zone = result['zone']
client.wait_until_zone_active(result_zone['id'])
client.delete_zone(result_zone['id'], status=202)
client.wait_until_zone_deleted(result_zone['id'])
client.get_zone(result_zone['id'], status=404)
result_zone = None
finally:
if result_zone:
client.abandon_zones([result_zone['id']], status=202)<|docstring|>Test deleting a zone<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.