filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_15005 | """Basic tests for quadrature GPy wrappers."""
import GPy
import numpy as np
import pytest
from pytest_lazyfixture import lazy_fixture
from emukit.model_wrappers.gpy_quadrature_wrappers import (
BaseGaussianProcessGPy,
BrownianGPy,
ProductBrownianGPy,
ProductMatern32GPy,
ProductMatern52GPy,
RBFGPy,
create_emukit_model_from_gpy_model,
)
from emukit.quadrature.kernels import (
QuadratureBrownianLebesgueMeasure,
QuadratureProductBrownianLebesgueMeasure,
QuadratureProductMatern32LebesgueMeasure,
QuadratureProductMatern52LebesgueMeasure,
QuadratureRBFGaussianMeasure,
QuadratureRBFLebesgueMeasure,
)
from emukit.quadrature.measures import GaussianMeasure, LebesgueMeasure
def get_prod_kernel(kernel_type, n_dim):
k = kernel_type(input_dim=1, active_dims=[0])
for i in range(1, n_dim):
k = k * kernel_type(input_dim=1, active_dims=[i])
return k
def data(n_dim: int):
return np.ones([3, n_dim]), np.ones([3, 1])
def integral_bounds(n_dim: int):
return n_dim * [(0, 1)]
def measure_lebesgue(n_dim: int):
return LebesgueMeasure.from_bounds(bounds=n_dim * [(0, 1)])
def measure_gaussian(n_dim: int):
return GaussianMeasure(mean=np.ones(n_dim), variance=1.0)
# === dimension fixtures start here
@pytest.fixture
def dim2():
return 2
@pytest.fixture
def dim1():
return 1
# === 1D GPy kernel fixtures start here
@pytest.fixture
def gpy_brownian(dim1):
kernel_type = GPy.kern.Brownian
return kernel_type(input_dim=dim1), kernel_type, False
@pytest.fixture
def gpy_matern32(dim1):
kernel_type = GPy.kern.Matern32
return kernel_type(input_dim=dim1), kernel_type, False
@pytest.fixture
def gpy_matern52(dim1):
kernel_type = GPy.kern.Matern52
return kernel_type(input_dim=dim1), kernel_type, False
# === 2D GPy kernel fixtures start here
@pytest.fixture
def gpy_rbf(dim2):
kernel_type = GPy.kern.RBF
return kernel_type(input_dim=dim2), kernel_type, False
@pytest.fixture
def gpy_prodbrownian(dim2):
kernel_type = GPy.kern.Brownian
return get_prod_kernel(kernel_type, dim2), kernel_type, True
@pytest.fixture
def gpy_prodmatern32(dim2):
kernel_type = GPy.kern.Matern32
return get_prod_kernel(kernel_type, dim2), kernel_type, True
@pytest.fixture
def gpy_prodmatern52(dim2):
kernel_type = GPy.kern.Matern52
return get_prod_kernel(kernel_type, dim2), kernel_type, True
def get_wrapper_dict(n_dim, measure, gpy_kern, gpy_kernel_wrapper_type, emukit_qkernel_type):
gpy_kernel, gpy_kernel_type, is_prod = gpy_kern
return {
"data": data(n_dim),
"measure": measure(n_dim),
"gpy_kernel": gpy_kernel,
"gpy_kernel_type": gpy_kernel_type,
"is_prod": is_prod,
"gpy_kernel_wrapper_type": gpy_kernel_wrapper_type,
"emukit_qkernel_type": emukit_qkernel_type,
}
# === RBF wrapper test cases
@pytest.fixture
def wrapper_rbf_1(dim2, gpy_rbf):
return get_wrapper_dict(dim2, measure_lebesgue, gpy_rbf, RBFGPy, QuadratureRBFLebesgueMeasure)
@pytest.fixture
def wrapper_rbf_2(dim2, gpy_rbf):
return get_wrapper_dict(dim2, measure_gaussian, gpy_rbf, RBFGPy, QuadratureRBFGaussianMeasure)
# === (product) Brownian wrapper test cases
@pytest.fixture
def wrapper_brownian_1(dim1, gpy_brownian):
return get_wrapper_dict(dim1, measure_lebesgue, gpy_brownian, BrownianGPy, QuadratureBrownianLebesgueMeasure)
@pytest.fixture
def wrapper_brownian_2(dim2, gpy_prodbrownian):
return get_wrapper_dict(
dim2, measure_lebesgue, gpy_prodbrownian, ProductBrownianGPy, QuadratureProductBrownianLebesgueMeasure
)
# === Product Matern32 wrapper test cases
@pytest.fixture
def wrapper_matern32_1(dim2, gpy_prodmatern32):
return get_wrapper_dict(
dim2, measure_lebesgue, gpy_prodmatern32, ProductMatern32GPy, QuadratureProductMatern32LebesgueMeasure
)
@pytest.fixture
def wrapper_matern32_2(dim1, gpy_matern32):
return get_wrapper_dict(
dim1, measure_lebesgue, gpy_matern32, ProductMatern32GPy, QuadratureProductMatern32LebesgueMeasure
)
# === Product Matern52 wrapper test cases
@pytest.fixture
def wrapper_matern52_1(dim2, gpy_prodmatern52):
return get_wrapper_dict(
dim2, measure_lebesgue, gpy_prodmatern52, ProductMatern52GPy, QuadratureProductMatern52LebesgueMeasure
)
@pytest.fixture
def wrapper_matern52_2(dim1, gpy_matern52):
return get_wrapper_dict(
dim1, measure_lebesgue, gpy_matern52, ProductMatern52GPy, QuadratureProductMatern52LebesgueMeasure
)
gpy_test_list = [
lazy_fixture("wrapper_rbf_1"),
lazy_fixture("wrapper_rbf_2"),
lazy_fixture("wrapper_brownian_1"),
lazy_fixture("wrapper_brownian_2"),
lazy_fixture("wrapper_matern32_1"),
lazy_fixture("wrapper_matern32_2"),
lazy_fixture("wrapper_matern52_1"),
lazy_fixture("wrapper_matern52_2"),
]
@pytest.mark.parametrize("wrapper", gpy_test_list)
def test_create_emukit_model_from_gpy_model_types(wrapper):
gpy_model = GPy.models.GPRegression(kernel=wrapper["gpy_kernel"], X=wrapper["data"][0], Y=wrapper["data"][1])
emukit_gp = create_emukit_model_from_gpy_model(gpy_model=gpy_model, measure=wrapper["measure"])
assert isinstance(emukit_gp.kern, wrapper["emukit_qkernel_type"])
assert isinstance(emukit_gp.kern.kern, wrapper["gpy_kernel_wrapper_type"])
# product kernel
if wrapper["is_prod"]:
assert isinstance(wrapper["gpy_kernel"], GPy.kern.Prod)
for k in wrapper["gpy_kernel"].parameters:
assert isinstance(k, wrapper["gpy_kernel_type"])
assert k.input_dim == 1
else:
assert isinstance(emukit_gp.gpy_model.kern, wrapper["gpy_kernel_type"])
def test_create_emukit_model_from_gpy_model_raises_warns():
input_dim = 2
gpy_kernel = GPy.kern.RBF(input_dim=input_dim)
gpy_model = GPy.models.GPRegression(kernel=gpy_kernel, X=np.ones([3, input_dim]), Y=np.ones([3, 1]))
bounds = input_dim * [(0, 1)]
measure = LebesgueMeasure.from_bounds(bounds=bounds)
# Neither measure nor bounds given
with pytest.raises(ValueError):
create_emukit_model_from_gpy_model(gpy_model=gpy_model)
# both measure and bounds are given. Bounds will be ignored.
with pytest.warns(UserWarning):
create_emukit_model_from_gpy_model(gpy_model=gpy_model, integral_bounds=bounds, measure=measure)
def test_base_gp_gpy_raises(gpy_prodbrownian):
incompatible_offset = -3
n_dim = 2
dat = data(n_dim=n_dim)
kern = ProductBrownianGPy(variance=1.0, input_dim=n_dim, offset=incompatible_offset)
measure = LebesgueMeasure.from_bounds(bounds=n_dim * [(0, 1)])
qkern = QuadratureProductBrownianLebesgueMeasure(brownian_kernel=kern, measure=measure)
# this GPy model and hence the emukit base_gp wrapper are not compatible with the kernel wrapper
# for offsets other than zero.
gpy_model = GPy.models.GPRegression(kernel=kern.gpy_brownian, X=dat[0], Y=dat[1])
with pytest.raises(ValueError):
BaseGaussianProcessGPy(kern=qkern, gpy_model=gpy_model)
|
the-stack_0_15007 | #!/usr/bin/env python
#===- lib/asan/scripts/asan_symbolize.py -----------------------------------===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
import argparse
import bisect
import getopt
import os
import re
import subprocess
import sys
symbolizers = {}
DEBUG = False
demangle = False
binutils_prefix = None
sysroot_path = None
binary_name_filter = None
fix_filename_patterns = None
logfile = sys.stdin
allow_system_symbolizer = True
force_system_symbolizer = False
# FIXME: merge the code that calls fix_filename().
def fix_filename(file_name):
if fix_filename_patterns:
for path_to_cut in fix_filename_patterns:
file_name = re.sub('.*' + path_to_cut, '', file_name)
file_name = re.sub('.*asan_[a-z_]*.cc:[0-9]*', '_asan_rtl_', file_name)
file_name = re.sub('.*crtstuff.c:0', '???:0', file_name)
return file_name
def sysroot_path_filter(binary_name):
return sysroot_path + binary_name
def is_valid_arch(s):
return s in ["i386", "x86_64", "x86_64h", "arm", "armv6", "armv7", "armv7s",
"armv7k", "arm64", "powerpc64", "powerpc64le", "s390x", "s390"]
def guess_arch(addr):
# Guess which arch we're running. 10 = len('0x') + 8 hex digits.
if len(addr) > 10:
return 'x86_64'
else:
return 'i386'
class Symbolizer(object):
def __init__(self):
pass
def symbolize(self, addr, binary, offset):
"""Symbolize the given address (pair of binary and offset).
Overriden in subclasses.
Args:
addr: virtual address of an instruction.
binary: path to executable/shared object containing this instruction.
offset: instruction offset in the @binary.
Returns:
list of strings (one string for each inlined frame) describing
the code locations for this instruction (that is, function name, file
name, line and column numbers).
"""
return None
class LLVMSymbolizer(Symbolizer):
def __init__(self, symbolizer_path, default_arch, system, dsym_hints=[]):
super(LLVMSymbolizer, self).__init__()
self.symbolizer_path = symbolizer_path
self.default_arch = default_arch
self.system = system
self.dsym_hints = dsym_hints
self.pipe = self.open_llvm_symbolizer()
def open_llvm_symbolizer(self):
cmd = [self.symbolizer_path,
'--use-symbol-table=true',
'--demangle=%s' % demangle,
'--functions=linkage',
'--inlining=true',
'--default-arch=%s' % self.default_arch]
if self.system == 'Darwin':
for hint in self.dsym_hints:
cmd.append('--dsym-hint=%s' % hint)
if DEBUG:
print(' '.join(cmd))
try:
result = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
bufsize=0,
universal_newlines=True)
except OSError:
result = None
return result
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if not self.pipe:
return None
result = []
try:
symbolizer_input = '"%s" %s' % (binary, offset)
if DEBUG:
print(symbolizer_input)
self.pipe.stdin.write("%s\n" % symbolizer_input)
while True:
function_name = self.pipe.stdout.readline().rstrip()
if not function_name:
break
file_name = self.pipe.stdout.readline().rstrip()
file_name = fix_filename(file_name)
if (not function_name.startswith('??') or
not file_name.startswith('??')):
# Append only non-trivial frames.
result.append('%s in %s %s' % (addr, function_name,
file_name))
except Exception:
result = []
if not result:
result = None
return result
def LLVMSymbolizerFactory(system, default_arch, dsym_hints=[]):
symbolizer_path = os.getenv('LLVM_SYMBOLIZER_PATH')
if not symbolizer_path:
symbolizer_path = os.getenv('ASAN_SYMBOLIZER_PATH')
if not symbolizer_path:
# Assume llvm-symbolizer is in PATH.
symbolizer_path = 'llvm-symbolizer'
return LLVMSymbolizer(symbolizer_path, default_arch, system, dsym_hints)
class Addr2LineSymbolizer(Symbolizer):
def __init__(self, binary):
super(Addr2LineSymbolizer, self).__init__()
self.binary = binary
self.pipe = self.open_addr2line()
self.output_terminator = -1
def open_addr2line(self):
addr2line_tool = 'addr2line'
if binutils_prefix:
addr2line_tool = binutils_prefix + addr2line_tool
cmd = [addr2line_tool, '-fi']
if demangle:
cmd += ['--demangle']
cmd += ['-e', self.binary]
if DEBUG:
print(' '.join(cmd))
return subprocess.Popen(cmd,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
bufsize=0,
universal_newlines=True)
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if self.binary != binary:
return None
lines = []
try:
self.pipe.stdin.write("%s\n" % offset)
self.pipe.stdin.write("%s\n" % self.output_terminator)
is_first_frame = True
while True:
function_name = self.pipe.stdout.readline().rstrip()
file_name = self.pipe.stdout.readline().rstrip()
if is_first_frame:
is_first_frame = False
elif function_name in ['', '??']:
assert file_name == function_name
break
lines.append((function_name, file_name));
except Exception:
lines.append(('??', '??:0'))
return ['%s in %s %s' % (addr, function, fix_filename(file)) for (function, file) in lines]
class UnbufferedLineConverter(object):
"""
Wrap a child process that responds to each line of input with one line of
output. Uses pty to trick the child into providing unbuffered output.
"""
def __init__(self, args, close_stderr=False):
# Local imports so that the script can start on Windows.
import pty
import termios
pid, fd = pty.fork()
if pid == 0:
# We're the child. Transfer control to command.
if close_stderr:
dev_null = os.open('/dev/null', 0)
os.dup2(dev_null, 2)
os.execvp(args[0], args)
else:
# Disable echoing.
attr = termios.tcgetattr(fd)
attr[3] = attr[3] & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, attr)
# Set up a file()-like interface to the child process
self.r = os.fdopen(fd, "r", 1)
self.w = os.fdopen(os.dup(fd), "w", 1)
def convert(self, line):
self.w.write(line + "\n")
return self.readline()
def readline(self):
return self.r.readline().rstrip()
class DarwinSymbolizer(Symbolizer):
def __init__(self, addr, binary, arch):
super(DarwinSymbolizer, self).__init__()
self.binary = binary
self.arch = arch
self.open_atos()
def open_atos(self):
if DEBUG:
print('atos -o %s -arch %s' % (self.binary, self.arch))
cmdline = ['atos', '-o', self.binary, '-arch', self.arch]
self.atos = UnbufferedLineConverter(cmdline, close_stderr=True)
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if self.binary != binary:
return None
atos_line = self.atos.convert('0x%x' % int(offset, 16))
while "got symbolicator for" in atos_line:
atos_line = self.atos.readline()
# A well-formed atos response looks like this:
# foo(type1, type2) (in object.name) (filename.cc:80)
match = re.match('^(.*) \(in (.*)\) \((.*:\d*)\)$', atos_line)
if DEBUG:
print('atos_line: ', atos_line)
if match:
function_name = match.group(1)
function_name = re.sub('\(.*?\)', '', function_name)
file_name = fix_filename(match.group(3))
return ['%s in %s %s' % (addr, function_name, file_name)]
else:
return ['%s in %s' % (addr, atos_line)]
# Chain several symbolizers so that if one symbolizer fails, we fall back
# to the next symbolizer in chain.
class ChainSymbolizer(Symbolizer):
def __init__(self, symbolizer_list):
super(ChainSymbolizer, self).__init__()
self.symbolizer_list = symbolizer_list
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
for symbolizer in self.symbolizer_list:
if symbolizer:
result = symbolizer.symbolize(addr, binary, offset)
if result:
return result
return None
def append_symbolizer(self, symbolizer):
self.symbolizer_list.append(symbolizer)
def BreakpadSymbolizerFactory(binary):
suffix = os.getenv('BREAKPAD_SUFFIX')
if suffix:
filename = binary + suffix
if os.access(filename, os.F_OK):
return BreakpadSymbolizer(filename)
return None
def SystemSymbolizerFactory(system, addr, binary, arch):
if system == 'Darwin':
return DarwinSymbolizer(addr, binary, arch)
elif system == 'Linux' or system == 'FreeBSD':
return Addr2LineSymbolizer(binary)
class BreakpadSymbolizer(Symbolizer):
def __init__(self, filename):
super(BreakpadSymbolizer, self).__init__()
self.filename = filename
lines = file(filename).readlines()
self.files = []
self.symbols = {}
self.address_list = []
self.addresses = {}
# MODULE mac x86_64 A7001116478B33F18FF9BEDE9F615F190 t
fragments = lines[0].rstrip().split()
self.arch = fragments[2]
self.debug_id = fragments[3]
self.binary = ' '.join(fragments[4:])
self.parse_lines(lines[1:])
def parse_lines(self, lines):
cur_function_addr = ''
for line in lines:
fragments = line.split()
if fragments[0] == 'FILE':
assert int(fragments[1]) == len(self.files)
self.files.append(' '.join(fragments[2:]))
elif fragments[0] == 'PUBLIC':
self.symbols[int(fragments[1], 16)] = ' '.join(fragments[3:])
elif fragments[0] in ['CFI', 'STACK']:
pass
elif fragments[0] == 'FUNC':
cur_function_addr = int(fragments[1], 16)
if not cur_function_addr in self.symbols.keys():
self.symbols[cur_function_addr] = ' '.join(fragments[4:])
else:
# Line starting with an address.
addr = int(fragments[0], 16)
self.address_list.append(addr)
# Tuple of symbol address, size, line, file number.
self.addresses[addr] = (cur_function_addr,
int(fragments[1], 16),
int(fragments[2]),
int(fragments[3]))
self.address_list.sort()
def get_sym_file_line(self, addr):
key = None
if addr in self.addresses.keys():
key = addr
else:
index = bisect.bisect_left(self.address_list, addr)
if index == 0:
return None
else:
key = self.address_list[index - 1]
sym_id, size, line_no, file_no = self.addresses[key]
symbol = self.symbols[sym_id]
filename = self.files[file_no]
if addr < key + size:
return symbol, filename, line_no
else:
return None
def symbolize(self, addr, binary, offset):
if self.binary != binary:
return None
res = self.get_sym_file_line(int(offset, 16))
if res:
function_name, file_name, line_no = res
result = ['%s in %s %s:%d' % (
addr, function_name, file_name, line_no)]
print(result)
return result
else:
return None
class SymbolizationLoop(object):
def __init__(self, binary_name_filter=None, dsym_hint_producer=None):
if sys.platform == 'win32':
# ASan on Windows uses dbghelp.dll to symbolize in-process, which works
# even in sandboxed processes. Nothing needs to be done here.
self.process_line = self.process_line_echo
else:
# Used by clients who may want to supply a different binary name.
# E.g. in Chrome several binaries may share a single .dSYM.
self.binary_name_filter = binary_name_filter
self.dsym_hint_producer = dsym_hint_producer
self.system = os.uname()[0]
if self.system not in ['Linux', 'Darwin', 'FreeBSD']:
raise Exception('Unknown system')
self.llvm_symbolizers = {}
self.last_llvm_symbolizer = None
self.dsym_hints = set([])
self.frame_no = 0
self.process_line = self.process_line_posix
def symbolize_address(self, addr, binary, offset, arch):
# On non-Darwin (i.e. on platforms without .dSYM debug info) always use
# a single symbolizer binary.
# On Darwin, if the dsym hint producer is present:
# 1. check whether we've seen this binary already; if so,
# use |llvm_symbolizers[binary]|, which has already loaded the debug
# info for this binary (might not be the case for
# |last_llvm_symbolizer|);
# 2. otherwise check if we've seen all the hints for this binary already;
# if so, reuse |last_llvm_symbolizer| which has the full set of hints;
# 3. otherwise create a new symbolizer and pass all currently known
# .dSYM hints to it.
result = None
if not force_system_symbolizer:
if not binary in self.llvm_symbolizers:
use_new_symbolizer = True
if self.system == 'Darwin' and self.dsym_hint_producer:
dsym_hints_for_binary = set(self.dsym_hint_producer(binary))
use_new_symbolizer = bool(dsym_hints_for_binary - self.dsym_hints)
self.dsym_hints |= dsym_hints_for_binary
if self.last_llvm_symbolizer and not use_new_symbolizer:
self.llvm_symbolizers[binary] = self.last_llvm_symbolizer
else:
self.last_llvm_symbolizer = LLVMSymbolizerFactory(
self.system, arch, self.dsym_hints)
self.llvm_symbolizers[binary] = self.last_llvm_symbolizer
# Use the chain of symbolizers:
# Breakpad symbolizer -> LLVM symbolizer -> addr2line/atos
# (fall back to next symbolizer if the previous one fails).
if not binary in symbolizers:
symbolizers[binary] = ChainSymbolizer(
[BreakpadSymbolizerFactory(binary), self.llvm_symbolizers[binary]])
result = symbolizers[binary].symbolize(addr, binary, offset)
else:
symbolizers[binary] = ChainSymbolizer([])
if result is None:
if not allow_system_symbolizer:
raise Exception('Failed to launch or use llvm-symbolizer.')
# Initialize system symbolizer only if other symbolizers failed.
symbolizers[binary].append_symbolizer(
SystemSymbolizerFactory(self.system, addr, binary, arch))
result = symbolizers[binary].symbolize(addr, binary, offset)
# The system symbolizer must produce some result.
assert result
return result
def get_symbolized_lines(self, symbolized_lines):
if not symbolized_lines:
return [self.current_line]
else:
result = []
for symbolized_frame in symbolized_lines:
result.append(' #%s %s' % (str(self.frame_no), symbolized_frame.rstrip()))
self.frame_no += 1
return result
def process_logfile(self):
self.frame_no = 0
for line in logfile:
processed = self.process_line(line)
print('\n'.join(processed))
def process_line_echo(self, line):
return [line.rstrip()]
def process_line_posix(self, line):
self.current_line = line.rstrip()
#0 0x7f6e35cf2e45 (/blah/foo.so+0x11fe45)
stack_trace_line_format = (
'^( *#([0-9]+) *)(0x[0-9a-f]+) *\((.*)\+(0x[0-9a-f]+)\)')
match = re.match(stack_trace_line_format, line)
if not match:
return [self.current_line]
if DEBUG:
print(line)
_, frameno_str, addr, binary, offset = match.groups()
arch = ""
# Arch can be embedded in the filename, e.g.: "libabc.dylib:x86_64h"
colon_pos = binary.rfind(":")
if colon_pos != -1:
maybe_arch = binary[colon_pos+1:]
if is_valid_arch(maybe_arch):
arch = maybe_arch
binary = binary[0:colon_pos]
if arch == "":
arch = guess_arch(addr)
if frameno_str == '0':
# Assume that frame #0 is the first frame of new stack trace.
self.frame_no = 0
original_binary = binary
if self.binary_name_filter:
binary = self.binary_name_filter(binary)
symbolized_line = self.symbolize_address(addr, binary, offset, arch)
if not symbolized_line:
if original_binary != binary:
symbolized_line = self.symbolize_address(addr, binary, offset, arch)
return self.get_symbolized_lines(symbolized_line)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='ASan symbolization script',
epilog='Example of use:\n'
'asan_symbolize.py -c "$HOME/opt/cross/bin/arm-linux-gnueabi-" '
'-s "$HOME/SymbolFiles" < asan.log')
parser.add_argument('path_to_cut', nargs='*',
help='pattern to be cut from the result file path ')
parser.add_argument('-d','--demangle', action='store_true',
help='demangle function names')
parser.add_argument('-s', metavar='SYSROOT',
help='set path to sysroot for sanitized binaries')
parser.add_argument('-c', metavar='CROSS_COMPILE',
help='set prefix for binutils')
parser.add_argument('-l','--logfile', default=sys.stdin,
type=argparse.FileType('r'),
help='set log file name to parse, default is stdin')
parser.add_argument('--force-system-symbolizer', action='store_true',
help='don\'t use llvm-symbolizer')
args = parser.parse_args()
if args.path_to_cut:
fix_filename_patterns = args.path_to_cut
if args.demangle:
demangle = True
if args.s:
binary_name_filter = sysroot_path_filter
sysroot_path = args.s
if args.c:
binutils_prefix = args.c
if args.logfile:
logfile = args.logfile
else:
logfile = sys.stdin
if args.force_system_symbolizer:
force_system_symbolizer = True
if force_system_symbolizer:
assert(allow_system_symbolizer)
loop = SymbolizationLoop(binary_name_filter)
loop.process_logfile()
|
the-stack_0_15008 | # Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
# Adapted from https://mypyc.readthedocs.io/en/latest/getting_started.html#example-program
import time
def fib(n: int) -> int:
if n <= 1:
return n
else:
return fib(n - 2) + fib(n - 1)
t0 = time.time()
fib(32)
if "__file__" in locals():
print("interpreted")
else:
print("compiled")
print(time.time() - t0)
|
the-stack_0_15009 | #!/usr/bin/python3
from pathlib import Path
import pytest
priv_key = "0x416b8a7d9290502f5661da81f0cf43893e3d19cb9aea3c426cfb36e8186e9c09"
addr = "0x14b0Ed2a7C4cC60DD8F676AE44D0831d3c9b2a9E"
@pytest.fixture(autouse=True)
def no_pass(monkeypatch):
monkeypatch.setattr("brownie.network.account.getpass", lambda x: "")
def test_save(accounts, tmpdir):
a = accounts.add(priv_key)
a.save(tmpdir + "/temp.json")
assert Path(tmpdir + "/temp.json").exists()
accounts._reset()
def test_save_nopath(accounts, tmpdir):
a = accounts.add(priv_key)
path = Path(a.save("temp", True))
assert path.exists()
path.unlink()
Path(a.save("temp"))
assert path.exists()
path.unlink()
accounts._reset()
def test_save_overwrite(accounts, tmpdir):
a = accounts.add(priv_key)
a.save(tmpdir + "/temp.json")
with pytest.raises(FileExistsError):
a.save(tmpdir + "/temp.json")
a.save(tmpdir + "/temp.json", True)
accounts._reset()
def test_load(accounts, tmpdir):
a = accounts.add(priv_key)
a.save(tmpdir + "/temp.json")
accounts._reset()
assert a not in accounts
a = accounts.load(tmpdir + "/temp.json")
assert a.address == addr
def test_load_nopath(accounts, tmpdir):
a = accounts.add(priv_key)
path = a.save("temp")
accounts._reset()
a = accounts.load("temp")
assert a.address == addr
Path(path).unlink()
def test_load_not_exists(accounts, tmpdir):
with pytest.raises(FileNotFoundError):
accounts.load(tmpdir + "/temp.json")
with pytest.raises(FileNotFoundError):
accounts.load("temp")
|
the-stack_0_15011 | """
data collector
"""
from random import randint
from time import sleep
import csv
import re
import concurrent.futures
import datetime
from bs4 import BeautifulSoup
import urllib3
import requests
from utils.utilities import ProjectCommon
OUTPUT_FILE_PATH = 'reviews_with_ranks.csv'
SCRAPER_FINAL_OUTPUT = []
MOVIE_REVIEW_URLS = []
class Anonymize:
"""
anonymize class
"""
def __init__(self):
self.headers = [{'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5)'
' AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/50.0.2661.102 Safari/537.36'},
{'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; '
'rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5 '
'(.NET CLR 3.5.30729)'},
{'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'MyAppName/1.0.0 ([email protected])'}]
@staticmethod
def sleeper():
"""
sleeper method used to sleep between requests
:return:
"""
sleep(randint(2, 5))
def randomize_request_headers(self):
"""
method to randomize request headers for each request
:return:
"""
return self.headers[randint(0, len(self.headers) - 1)]
def movie_review_url_collector():
"""
function collecting urls with the movie reviews
:return:0
"""
start_page_urls = ['https://www.csfd.cz/zebricky/nejhorsi-filmy/?show=complete',
'https://www.csfd.cz/zebricky/nejlepsi-filmy/?show=complete']
anonymize = Anonymize()
for start_page in start_page_urls:
page = requests.get(start_page, headers=anonymize.randomize_request_headers())
soup = BeautifulSoup(page.content, 'html.parser')
movie_review_url = soup.find_all('td', attrs={'class': 'film'})
for url_item in movie_review_url[:300]:
children = url_item.findChildren("a", recursive=False)
movie_name = str(children).split("/")[2]
for random_index in ([2, 3, 4, 5, 6, 7]):
review_page = str(random_index)
MOVIE_REVIEW_URLS.append('https://www.csfd.cz/film/{}/komentare/strana-{}'.
format(movie_name, review_page))
return 0
def movie_review_scraper(url_to_scrape):
"""
function getting the url from the argument, requesting the raw html
and scraping the movie review html code
:param url_to_scrape: url
:return:None
"""
anonymize = Anonymize()
print(f'{datetime.datetime.now()} started scraping {url_to_scrape}')
try:
anonymize.sleeper()
page = requests.get(url, headers=anonymize.randomize_request_headers())
if page.status_code == 200:
# the <li> html tag structure we're scraping in loops:
#
# variation #1 with star count as rank in the img alt text tag:
# <li id = "comment-796722" >
# <div class ="info" >
# <a href = "" > all reviewer's reviews </a>/
# <a href = "" > <img src = "" class ="" ></a>
# </div>
# <h5 class = "author" > <a href="" > reviewers nickname </a></h5>
# <img src = "" class ="rating" width="32" alt="****" / >
# <p class ="post" > movie review
# <span class ="date desc" > date of review </span></p>
# </li>
#
# variation #2 with 1 word ranking ("odpad!" translates to "junk") in the strong tag:
# <li id = "comment-9092651" >
# <div class ="info" >
# <a href = "" > all reviewer's reviews </a>/
# <a href = "" > <img src = "" class ="" ></a>
# </div>
# <h5 class ="author" > <a href="" > reviewers nickname </a></h5>
# <strong class ="rating" > odpad! </strong>
# <p class ="post" > movie review
# <span class ="date desc" > date of review </span></p>
# </li>
soup = BeautifulSoup(page.content, 'html.parser')
_l_substring_to_trim_from = '<p class="post">'
_r_substring_to_trim_to = '<span class="date desc">'
for soup_item in soup.find_all("li", {"id": re.compile(r"comment-*")}):
scraper_temp_output = []
img = soup_item.findChildren("img",
attrs={'class': 'rating'})
strong = soup_item.findChildren(["strong", "p"],
attrs={'class': ['rating', 'post']})
if strong and str(strong).startswith('[<strong class="rating">odpad!</strong>'):
_r_trim = len(str(strong)) - str(strong).rfind(_r_substring_to_trim_to)
_l_trim = str(strong).rfind(_l_substring_to_trim_from) + len(_l_substring_to_trim_from)
scraper_temp_output.append({'rank': -2,
'review': str(strong)[_l_trim:-_r_trim]})
else:
_r_trim = len(str(img)) - str(img).rfind(_r_substring_to_trim_to)
_l_trim = str(img).rfind(_l_substring_to_trim_from) + len(_l_substring_to_trim_from)
if img and str(img).startswith('[<img alt="*"'):
scraper_temp_output.append({'rank': -2,
'review': str(img)[_l_trim:-_r_trim]})
elif img and str(img).startswith('[<img alt="**"'):
scraper_temp_output.append({'rank': -1,
'review': str(img)[_l_trim:-_r_trim]})
elif img and str(img).startswith('[<img alt="***"'):
scraper_temp_output.append({'rank': 1,
'review': str(img)[_l_trim:-_r_trim]})
elif img and str(img).startswith('[<img alt="****"'):
scraper_temp_output.append({'rank': 2,
'review': str(img)[_l_trim:-_r_trim]})
elif img and str(img).startswith('[<img alt="*****"'):
scraper_temp_output.append({'rank': 2,
'review': str(img)[_l_trim:-_r_trim]})
for item in scraper_temp_output:
raw_review = item.get('review')
review = ProjectCommon.remove_html(str(raw_review).lower())
rank = item.get('rank')
SCRAPER_FINAL_OUTPUT.append((review, rank))
print(f'{datetime.datetime.now()} finished scraping {url}')
else:
print(f'{datetime.datetime.now()} Invalid request status code '
f'{str(page.status_code)} for {url}')
except urllib3.exceptions.ConnectionError as connerr:
print(str(connerr))
except Exception as exc:
print(str(exc))
if __name__ == "__main__":
# fill the list with urls used for movie data scraping
movie_review_url_collector()
# process the list items in a multi-threaded pool based
# scraper function movie_review_scraper
with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:
FUTURE_TO_URL = {executor.submit(movie_review_scraper, url):
url for url in MOVIE_REVIEW_URLS}
for future in concurrent.futures.as_completed(FUTURE_TO_URL):
url = FUTURE_TO_URL[future]
try:
data = future.result()
except Exception as exc:
print('%r generated an exception: %s' % (url, exc))
# write to OUTPUT_FILE_PATH csv file the scraped movie review data
with open(OUTPUT_FILE_PATH, 'w', encoding='utf8', newline='\n') as fw:
writer = csv.writer(fw, escapechar='/', quoting=csv.QUOTE_NONNUMERIC)
writer.writerows(SCRAPER_FINAL_OUTPUT)
print("Movie review data collection phase complete.")
|
the-stack_0_15012 | import os
import numpy as np
from scipy.io import netcdf_file
from pychemia.utils.periodic import atomic_symbol
from .htmlparser import MyHTMLParser
"""
This module provides general routines used by abipython
but not requiring the abipython classes
"""
__author__ = "Guillermo Avendano-Franco"
__copyright__ = "Copyright 2016"
__version__ = "1.1"
__maintainer__ = "Guillermo Avendano-Franco"
__email__ = "[email protected]"
__status__ = "Development"
__date__ = "May 13, 2016"
def netcdf2dict(filename):
"""
Read a NetCDF file and create a python dictionary with
numpy arrays for variables
Args:
filename:
NetCDF filename
"""
if not os.path.isfile(filename):
print('ERROR: No such file: ', filename)
return None
output = {}
netcdffile = netcdf_file(filename, 'r', mmap=False)
for ii in netcdffile.variables.keys():
output[ii] = netcdffile.variables[ii][:]
netcdffile.close()
return output
def psp_name(atomicnumber, exchange, kind):
"""
Return the filename of a certain PSP given the
atomic number, exchange ('LDA' or 'GGA')
and kind ('FHI','TM')
:param atomicnumber: (int) Atomic number
:param exchange: (str) 'LDA' or 'GGA'
:param kind: (str) Source of Pseudopotentials
:return:
"""
atom_symbol = str(atomic_symbol(atomicnumber))
if kind == 'FHI' and exchange == 'LDA':
filename = str(atomicnumber).zfill(2) + '-' + atom_symbol + '.LDA.fhi'
elif kind == 'FHI' and exchange == 'GGA':
filename = str(atomicnumber).zfill(2) + '-' + atom_symbol + '.GGA.fhi'
elif kind == 'CORE' and exchange == 'LDA':
filename = str(atomicnumber) + atom_symbol.lower() + '.1s_psp.mod'
elif kind == 'GTH' and exchange == 'LDA':
filename = str(atomicnumber).zfill(2) + atom_symbol.lower() + '.pspgth'
elif kind == 'TM' and exchange == 'LDA':
filename = str(atomicnumber) + atom_symbol.lower() + '.pspnc'
elif kind == 'AE' and exchange == 'DEN':
filename = '0.' + str(atomicnumber).zfill(2) + '-' + atom_symbol + '.8.density.AE'
elif kind == 'FC' and exchange == 'DEN':
filename = str(atomicnumber).zfill(2) + '-' + atom_symbol + '.8.fc'
elif kind == 'PAW' and exchange == 'GGA':
filename = 'JTH-PBE-atomicdata-0.2/' + atom_symbol + '.GGA_PBE-JTH.xml'
elif kind == 'PAW' and exchange == 'LDA':
filename = 'JTH-LDA-atomicdata-0.2/' + atom_symbol + '.LDA_PW-JTH.xml'
elif kind == 'HGH' and exchange == 'GGA':
filename = str(atomicnumber).zfill(2) + atom_symbol.lower() + '.pbe_hgh'
elif kind == 'ONC' and exchange == 'PBE':
filename = 'pbe_s_sr' + os.sep + atom_symbol + '.psp8'
else:
print('The combination of exchange=%s and kind=%s is not known' % (exchange, kind))
filename = ''
return filename
def split_varname(varname):
if varname[-2:].isdigit():
prefix = varname[:-2]
suffix = varname[-2:]
elif varname[-2].isdigit() and varname[-1] == '?':
prefix = varname[:-2]
suffix = varname[-2:]
elif varname[-1].isdigit() and varname[-2] == '?':
prefix = varname[:-2]
suffix = varname[-2:]
elif varname[-1].isdigit():
prefix = varname[:-1]
suffix = varname[-1:]
elif varname[-1] == '?':
prefix = varname[:-1]
suffix = varname[-1:]
else:
prefix = varname
suffix = ''
return prefix, suffix
def plot_simple(variables, varname):
from matplotlib.pylab import subplots
from numpy import arange, mean, apply_along_axis, linalg
from math import sqrt
fig, ax = subplots(nrows=1, ncols=1)
fig.set_size_inches(15, 4)
ndtset = variables['ndtset'][0]
lens = np.array([len(variables[varname + str(x + 1)]) for x in range(ndtset)])
x = arange(ndtset) + 1
if max(lens) == min(lens):
if lens[0] == 1:
y = np.array([variables['etotal' + str(x + 1)][0] for x in range(ndtset)])
elif lens[0] % 3 == 0:
y = np.array([mean(apply_along_axis(linalg.norm, 1, variables['fcart' + str(x + 1)].reshape((-1, 3))))
for x in range(ndtset)])
else:
y = np.array([sqrt(sum(variables['fcart' + str(x + 1)] ** 2)) for x in range(ndtset)])
ax.plot(x, y, 'ro')
ax.plot(x, y, 'b-')
ax.set_xlabel('Dataset')
ax.set_ylabel(varname)
ax.set_xlim(1, ndtset + 1)
if ndtset < 30:
ax.set_xticks(arange(ndtset + 1))
ax.grid(which='major', axis='both')
def abihelp(varname):
import json
hp = MyHTMLParser()
import pychemia.code.abinit as _pdca
rf = open(_pdca.__path__[0] + '/ABINIT_variables.json', 'r')
variables = json.load(rf)
rf.close()
if varname not in variables.keys():
print('ERROR: ' + varname + ' is not in the list of variables of ABINIT')
return
else:
abivar = variables[varname]
print(varname)
print('')
print('DEFINITION:', abivar['definition'])
print('SECTION: ', abivar['section'])
print('DEFAULT: ', hp.feed(abivar['default']))
print('')
print(hp.feed(abivar['text']))
print('')
|
the-stack_0_15016 | '''Michael Lange <klappnase (at) freakmail (dot) de>
The ToolTip class provides a flexible tooltip widget for Tkinter; it is based on IDLE's ToolTip
module which unfortunately seems to be broken (at least the version I saw).
INITIALIZATION OPTIONS:
anchor : where the text should be positioned inside the widget, must be on of "n", "s", "e", "w", "nw" and so on;
default is "center"
bd : borderwidth of the widget; default is 1 (NOTE: don't use "borderwidth" here)
bg : background color to use for the widget; default is "lightyellow" (NOTE: don't use "background")
delay : time in ms that it takes for the widget to appear on the screen when the mouse pointer has
entered the parent widget; default is 1500
fg : foreground (i.e. text) color to use; default is "black" (NOTE: don't use "foreground")
follow_mouse : if set to 1 the tooltip will follow the mouse pointer instead of being displayed
outside of the parent widget; this may be useful if you want to use tooltips for
large widgets like listboxes or canvases; default is 0
font : font to use for the widget; default is system specific
justify : how multiple lines of text will be aligned, must be "left", "right" or "center"; default is "left"
padx : extra space added to the left and right within the widget; default is 4
pady : extra space above and below the text; default is 2
relief : one of "flat", "ridge", "groove", "raised", "sunken" or "solid"; default is "solid"
state : must be "normal" or "disabled"; if set to "disabled" the tooltip will not appear; default is "normal"
text : the text that is displayed inside the widget
textvariable : if set to an instance of Tkinter.StringVar() the variable's value will be used as text for the widget
width : width of the widget; the default is 0, which means that "wraplength" will be used to limit the widgets width
wraplength : limits the number of characters in each line; default is 150
WIDGET METHODS:
configure(**opts) : change one or more of the widget's options as described above; the changes will take effect the
next time the tooltip shows up; NOTE: follow_mouse cannot be changed after widget initialization
Other widget methods that might be useful if you want to subclass ToolTip:
enter() : callback when the mouse pointer enters the parent widget
leave() : called when the mouse pointer leaves the parent widget
motion() : is called when the mouse pointer moves inside the parent widget if follow_mouse is set to 1 and the
tooltip has shown up to continually update the coordinates of the tooltip window
coords() : calculates the screen coordinates of the tooltip window
create_contents() : creates the contents of the tooltip window (by default a Tkinter.Label)
'''
# Ideas gleaned from PySol
try:
import Tkinter as tkinter
except ImportError:
import tkinter
class ToolTip:
def __init__(self, master, text='Your text here', delay=1500, **opts):
self.master = master
self._opts = {'anchor':'center', 'bd':1, 'bg':'lightyellow', 'delay':delay, 'fg':'black',\
'follow_mouse':0, 'font':None, 'justify':'left', 'padx':4, 'pady':2,\
'relief':'solid', 'state':'normal', 'text':text, 'textvariable':None,\
'width':0, 'wraplength':150}
self.configure(**opts)
self._tipwindow = None
self._id = None
self._id1 = self.master.bind("<Enter>", self.enter, '+')
self._id2 = self.master.bind("<Leave>", self.leave, '+')
self._id3 = self.master.bind("<ButtonPress>", self.leave, '+')
self._follow_mouse = 0
if self._opts['follow_mouse']:
self._id4 = self.master.bind("<Motion>", self.motion, '+')
self._follow_mouse = 1
def configure(self, **opts):
for key in opts:
if key in self._opts:
self._opts[key] = opts[key]
else:
KeyError = 'KeyError: Unknown option: "%s"' % key
raise KeyError
##----these methods handle the callbacks on "<Enter>", "<Leave>" and "<Motion>"---------------##
##----events on the parent widget; override them if you want to change the widget's behavior--##
def enter(self, event=None):
self._schedule()
def leave(self, event=None):
self._unschedule()
self._hide()
def motion(self, event=None):
if self._tipwindow and self._follow_mouse:
x, y = self.coords()
self._tipwindow.wm_geometry("+%d+%d" % (x, y))
##------the methods that do the work:---------------------------------------------------------##
def _schedule(self):
self._unschedule()
if self._opts['state'] == 'disabled':
return
self._id = self.master.after(self._opts['delay'], self._show)
def _unschedule(self):
id = self._id
self._id = None
if id:
self.master.after_cancel(id)
def _show(self):
if self._opts['state'] == 'disabled':
self._unschedule()
return
if not self._tipwindow:
self._tipwindow = tw = tkinter.Toplevel(self.master)
# hide the window until we know the geometry
tw.withdraw()
tw.wm_overrideredirect(1)
if tw.tk.call("tk", "windowingsystem") == 'aqua':
tw.tk.call("::tk::unsupported::MacWindowStyle", "style", tw._w, "help", "none")
self.create_contents()
tw.update_idletasks()
x, y = self.coords()
tw.wm_geometry("+%d+%d" % (x, y))
tw.deiconify()
def _hide(self):
tw = self._tipwindow
self._tipwindow = None
if tw:
tw.destroy()
##----these methods might be overridden in derived classes:----------------------------------##
def coords(self):
# The tip window must be completely outside the master widget;
# otherwise when the mouse enters the tip window we get
# a leave event and it disappears, and then we get an enter
# event and it reappears, and so on forever :-(
# or we take care that the mouse pointer is always outside the tipwindow :-)
tw = self._tipwindow
twx, twy = tw.winfo_reqwidth(), tw.winfo_reqheight()
w, h = tw.winfo_screenwidth(), tw.winfo_screenheight()
# calculate the y coordinate:
if self._follow_mouse:
y = tw.winfo_pointery() + 20
# make sure the tipwindow is never outside the screen:
if y + twy > h:
y = y - twy - 30
else:
y = self.master.winfo_rooty() + self.master.winfo_height() + 3
if y + twy > h:
y = self.master.winfo_rooty() - twy - 3
# we can use the same x coord in both cases:
x = tw.winfo_pointerx() - twx / 2
if x < 0:
x = 0
elif x + twx > w:
x = w - twx
return x, y
def create_contents(self):
opts = self._opts.copy()
for opt in ('delay', 'follow_mouse', 'state'):
del opts[opt]
label = tkinter.Label(self._tipwindow, **opts)
label.pack()
##---------demo code-----------------------------------##
def demo():
root = tkinter.Tk(className='ToolTip-demo')
l = tkinter.Listbox(root)
l.insert('end', "I'm a listbox")
l.pack(side='top')
t1 = ToolTip(l, follow_mouse=1, text="I'm a tooltip with follow_mouse set to 1, so I won't be placed outside my parent")
b = tkinter.Button(root, text='Quit', command=root.quit)
b.pack(side='bottom')
t2 = ToolTip(b, text='Enough of this')
root.mainloop()
if __name__ == '__main__':
demo()
|
the-stack_0_15017 | import json
from collections import Counter
class Sorter(object):
def __init__(self):
# These are playlists that are to be edited, this can be changed
from configparser import ConfigParser; config = ConfigParser(); \
config.read('config.ini'); \
self.target_playlists = config.get('Settings', 'TargetPlaylists')
self.target_playlists = self.target_playlists.split(',')
self.target_playlists = [x.strip() for x in self.target_playlists]
def findtargetPlaylists(self, playlist_json, length_check=False):
found_playlists = {}
if(isinstance(playlist_json, (str, bytes))): playlist_json = json.loads(playlist_json)
playlist_names = self.findPlaylists(playlist_json)
for i, playlist in enumerate(playlist_names):
if playlist in self.target_playlists: found_playlists[playlist] = playlist_json['items'][i]['id']
if(length_check):
if len(playlist_json['items']) == 20: extension_state=True
else: extension_state=False
return(found_playlists, extension_state)
return(found_playlists)
def findPlaylists(self, playlist_json):
found_playlists = []
if(isinstance(playlist_json, (str, bytes))): playlist_json = json.loads(playlist_json)
for item in playlist_json['items']: found_playlists.append(f"{item['name']}")
return(found_playlists)
def findSongs(self, recent_json):
recent_songs = []
if(isinstance(recent_json, (str, bytes))): recent_json = json.loads(recent_json)
for item in recent_json['items']: recent_songs.append(f"{item['track']['name']}:{item['track']['id']}")
return(recent_songs)
def findtopSongs(self, songs_list):
listened_songs = []
for vector in songs_list: listened_songs.append(vector)
song_counter = Counter(listened_songs)
top_songs = sorted(song_counter, key=lambda x: -song_counter[x])
songs_counted = song_counter.most_common()
return(songs_counted)
def findtimeAdded(self, playlist_json):
if(isinstance(playlist_json, (str, bytes))): playlist_json = json.loads(playlist_json)
elif(isinstance(playlist_json, dict)): pass
else: return
timestamp_list = [item['added_at'] for item in playlist_json['items']]
return(timestamp_list)
def compare_recent(self, recent_json, new_json):
new_songs = self.findSongs(new_json)
recent_songs = self.findSongs(recent_json)
return not(new_songs == recent_songs)
|
the-stack_0_15018 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -alertnotify, -blocknotify and -walletnotify options."""
import os
from test_framework.test_framework import JdCoinTestFramework
from test_framework.util import (
assert_equal,
wait_until,
connect_nodes,
)
class NotificationsTest(JdCoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self):
self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
self.block_filename = os.path.join(self.options.tmpdir, "blocks.txt")
self.tx_filename = os.path.join(self.options.tmpdir, "transactions.txt")
# -alertnotify and -blocknotify on node0, walletnotify on node1
self.extra_args = [["-blockversion=4",
"-alertnotify=echo %%s >> %s" % self.alert_filename,
"-blocknotify=echo %%s >> %s" % self.block_filename],
["-blockversion=211",
"-rescan",
"-walletnotify=echo %%s >> %s" % self.tx_filename]]
super().setup_network()
def run_test(self):
self.log.info("test -blocknotify")
block_count = 10
blocks = self.nodes[1].generate(block_count)
# wait at most 10 seconds for expected file size before reading the content
wait_until(lambda: os.path.isfile(self.block_filename) and os.stat(self.block_filename).st_size >= (block_count * 65), timeout=10)
# file content should equal the generated blocks hashes
with open(self.block_filename, 'r') as f:
assert_equal(sorted(blocks), sorted(f.read().splitlines()))
self.log.info("test -walletnotify")
# wait at most 10 seconds for expected file size before reading the content
wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(self.tx_filename).st_size >= (block_count * 65), timeout=10)
# file content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count)))
with open(self.tx_filename, 'r') as f:
assert_equal(sorted(txids_rpc), sorted(f.read().splitlines()))
os.remove(self.tx_filename)
self.log.info("test -walletnotify after rescan")
# restart node to rescan to force wallet notifications
self.restart_node(1)
connect_nodes(self.nodes[0], 1)
wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(self.tx_filename).st_size >= (block_count * 65), timeout=10)
# file content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count)))
with open(self.tx_filename, 'r') as f:
assert_equal(sorted(txids_rpc), sorted(f.read().splitlines()))
# Mine another 41 up-version blocks. -alertnotify should trigger on the 51st.
self.log.info("test -alertnotify")
self.nodes[1].generate(51)
self.sync_all()
# Give jdcoind 10 seconds to write the alert notification
wait_until(lambda: os.path.isfile(self.alert_filename) and os.path.getsize(self.alert_filename), timeout=10)
with open(self.alert_filename, 'r', encoding='utf8') as f:
alert_text = f.read()
# Mine more up-version blocks, should not get more alerts:
self.nodes[1].generate(2)
self.sync_all()
with open(self.alert_filename, 'r', encoding='utf8') as f:
alert_text2 = f.read()
self.log.info("-alertnotify should not continue notifying for more unknown version blocks")
assert_equal(alert_text, alert_text2)
if __name__ == '__main__':
NotificationsTest().main()
|
the-stack_0_15019 | import os
def rename_files():
# (1) get file names from a folder
file_list = os.listdir("prank")
print(file_list)
saved_path = os.getcwd()
print("Current Working Directory is %s" % saved_path)
os.chdir("prank")
# (2) for each file, rename filename
for file_name in file_list:
os.rename(file_name, file_name.translate(None, "0123456789"))
os.chdir(saved_path)
rename_files()
# os.rename("test", "test3")
# os.rename("test1", "test2")
|
the-stack_0_15020 | from pymongo import MongoClient
client = MongoClient("mongodb://127.0.0.1:27017/")
database = "mydb"
collections = ["cars", "cops"]
database = client[database]
cars = database[collections[0]]
cops = database[collections[1]]
def get_all_cars():
return cars.find()
def get_free_cars():
return cars.find({"resolved": 0, "assigned": 0})
def put_car(car):
car['cop_id'] = ""
car['resolved'] = 0
car['assigned'] = 0
car_id = cars.insert_one(car).inserted_id
return car_id
def get_all_cops():
return cops.find()
def get_available_cops():
return cops.find({"available": 1})
def put_cop(cop):
cop['available'] = 1
cop['car_id'] = ""
cop_id = cops.insert_one(cop).inserted_id
return cop_id
def assign_cop_car(cop_id, car_id):
if list(cops.find_one({"_id": cop_id}, {"_id": 0, "available": 1}).values()) != [1]: return "failed"
if list(cars.find_one({"_id": car_id}, {"_id": 0, "resolved": 1, "assigned": 1}).values()) != [0, 0]: return "failed"
cops.update_one({"_id": cop_id}, {"$set": {"available": 0, "car_id": car_id}})
cars.update_one({"_id": car_id}, {"$set": {"assigned": 1, "cop_id": cop_id}})
return "assigned"
def complete_assignment(cop_id, car_id):
if list(cops.find_one({"_id": cop_id}, {"_id": 0, "available": 1}).values()) != [0]: return "failed"
if list(cars.find_one({"_id": car_id}, {"_id": 0, "resolved": 1, "assigned": 1}).values()) != [0, 1]: return "failed"
cops.update_one({"_id": cop_id}, {"$set": {"available": 1}})
cops.update_one({"_id": cop_id}, {"$set": {"car_id": ""}})
cars.update_one({"_id": car_id}, {"$set": {"assigned": 0}})
cars.update_one({"_id": car_id}, {"$set": {"resolved": 1}})
cars.update_one({"_id": car_id}, {"$set": {"cop_id": ""}})
return "resolved" |
the-stack_0_15021 | """Utilities related archives.
"""
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
# mypy: disallow-untyped-defs=False
from __future__ import absolute_import
import logging
import os
import shutil
import stat
import tarfile
import zipfile
from pip._internal.exceptions import InstallationError
from pip._internal.utils.filetypes import (
BZ2_EXTENSIONS,
TAR_EXTENSIONS,
XZ_EXTENSIONS,
ZIP_EXTENSIONS,
)
from pip._internal.utils.misc import ensure_dir
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Iterable, List, Optional, Text, Union
logger = logging.getLogger(__name__)
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
try:
import bz2 # noqa
SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS
except ImportError:
logger.debug("bz2 module is not available")
try:
# Only for Python 3.3+
import lzma # noqa
SUPPORTED_EXTENSIONS += XZ_EXTENSIONS
except ImportError:
logger.debug("lzma module is not available")
def current_umask():
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def split_leading_dir(path):
# type: (Union[str, Text]) -> List[Union[str, Text]]
path = path.lstrip("/").lstrip("\\")
if "/" in path and (
("\\" in path and path.find("/") < path.find("\\")) or "\\" not in path
):
return path.split("/", 1)
elif "\\" in path:
return path.split("\\", 1)
else:
return [path, ""]
def has_leading_dir(paths):
# type: (Iterable[Union[str, Text]]) -> bool
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def is_within_directory(directory, target):
# type: ((Union[str, Text]), (Union[str, Text])) -> bool
"""
Return true if the absolute path of target is within the directory
"""
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def unzip_file(filename, location, flatten=True):
# type: (str, str, bool) -> None
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
zipfp = open(filename, "rb")
try:
zip = zipfile.ZipFile(zipfp, allowZip64=True)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if not is_within_directory(location, fn):
message = (
"The zip file ({}) has a file ({}) trying to install "
"outside target directory ({})"
)
raise InstallationError(message.format(filename, fn, location))
if fn.endswith("/") or fn.endswith("\\"):
# A directory
ensure_dir(fn)
else:
ensure_dir(dir)
# Don't use read() to avoid allocating an arbitrarily large
# chunk of memory for the file's content
fp = zip.open(name)
try:
with open(fn, "wb") as destfp:
shutil.copyfileobj(fp, destfp)
finally:
fp.close()
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for
# user/group/world?
if mode and stat.S_ISREG(mode) and mode & 0o111:
# make dest file have execute for user/group/world
# (chmod +x) no-op on windows per python docs
os.chmod(fn, (0o777 - current_umask() | 0o111))
finally:
zipfp.close()
def untar_file(filename, location):
# type: (str, str) -> None
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
if filename.lower().endswith(".gz") or filename.lower().endswith(".tgz"):
mode = "r:gz"
elif filename.lower().endswith(BZ2_EXTENSIONS):
mode = "r:bz2"
elif filename.lower().endswith(XZ_EXTENSIONS):
mode = "r:xz"
elif filename.lower().endswith(".tar"):
mode = "r"
else:
logger.warning(
"Cannot determine compression type for file %s", filename,
)
mode = "r:*"
tar = tarfile.open(filename, mode)
try:
leading = has_leading_dir([member.name for member in tar.getmembers()])
for member in tar.getmembers():
fn = member.name
if leading:
# https://github.com/python/mypy/issues/1174
fn = split_leading_dir(fn)[1] # type: ignore
path = os.path.join(location, fn)
if not is_within_directory(location, path):
message = (
"The tar file ({}) has a file ({}) trying to install "
"outside target directory ({})"
)
raise InstallationError(message.format(filename, path, location))
if member.isdir():
ensure_dir(path)
elif member.issym():
try:
# https://github.com/python/typeshed/issues/2673
tar._extract_member(member, path) # type: ignore
except Exception as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
"In the tar file %s the member %s is invalid: %s",
filename,
member.name,
exc,
)
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
"In the tar file %s the member %s is invalid: %s",
filename,
member.name,
exc,
)
continue
ensure_dir(os.path.dirname(path))
with open(path, "wb") as destfp:
shutil.copyfileobj(fp, destfp)
fp.close()
# Update the timestamp (useful for cython compiled files)
# https://github.com/python/typeshed/issues/2673
tar.utime(member, path) # type: ignore
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
# make dest file have execute for user/group/world
# no-op on windows per python docs
os.chmod(path, (0o777 - current_umask() | 0o111))
finally:
tar.close()
def unpack_file(
filename, # type: str
location, # type: str
content_type=None, # type: Optional[str]
):
# type: (...) -> None
filename = os.path.realpath(filename)
if (
content_type == "application/zip"
or filename.lower().endswith(ZIP_EXTENSIONS)
or zipfile.is_zipfile(filename)
):
unzip_file(filename, location, flatten=not filename.endswith(".whl"))
elif (
content_type == "application/x-gzip"
or tarfile.is_tarfile(filename)
or filename.lower().endswith(TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)
):
untar_file(filename, location)
else:
# FIXME: handle?
# FIXME: magic signatures?
logger.critical(
"Cannot unpack file %s (downloaded from %s, content-type: %s); "
"cannot detect archive format",
filename,
location,
content_type,
)
raise InstallationError(
"Cannot determine archive format of {}".format(location)
)
|
the-stack_0_15022 | # -*- coding: utf-8 -*-
""" Deeplabv3+ model for Keras.
This model is based on TF repo:
https://github.com/tensorflow/models/tree/master/research/deeplab
On Pascal VOC, original model gets to 84.56% mIOU
Now this model is only available for the TensorFlow backend,
due to its reliance on `SeparableConvolution` layers, but Theano will add
this layer soon.
MobileNetv2 backbone is based on this repo:
https://github.com/JonathanCMitchell/mobilenet_v2_keras
# Reference
- [Encoder-Decoder with Atrous Separable Convolution
for Semantic Image Segmentation](https://arxiv.org/pdf/1802.02611.pdf)
- [Xception: Deep Learning with Depthwise Separable Convolutions]
(https://arxiv.org/abs/1610.02357)
- [Inverted Residuals and Linear Bottlenecks: Mobile Networks for
Classification, Detection and Segmentation](https://arxiv.org/abs/1801.04381)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from keras.models import Model
from keras import layers
from keras.layers import Input
from keras.layers import Activation
from keras.layers import Concatenate
from keras.layers import Add
from keras.layers import Dropout
from keras.layers import BatchNormalization
from keras.layers import Conv2D
from keras.layers import DepthwiseConv2D
from keras.layers import ZeroPadding2D
from keras.layers import AveragePooling2D
from keras.engine import Layer
from keras.engine import InputSpec
from keras.engine.topology import get_source_inputs
from keras import backend as K
from keras.applications import imagenet_utils
from keras.utils import conv_utils
from keras.utils.data_utils import get_file
WEIGHTS_PATH_X = "https://github.com/bonlime/keras-deeplab-v3-plus/releases/download/1.1/deeplabv3_xception_tf_dim_ordering_tf_kernels.h5"
WEIGHTS_PATH_MOBILE = "https://github.com/bonlime/keras-deeplab-v3-plus/releases/download/1.1/deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels.h5"
class BilinearUpsampling(Layer):
"""Just a simple bilinear upsampling layer. Works only with TF.
Args:
upsampling: tuple of 2 numbers > 0. The upsampling ratio for h and w
output_size: used instead of upsampling arg if passed!
"""
def __init__(self, upsampling=(2, 2), output_size=None, data_format=None, **kwargs):
super(BilinearUpsampling, self).__init__(**kwargs)
self.data_format = K.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=4)
if output_size:
self.output_size = conv_utils.normalize_tuple(
output_size, 2, 'output_size')
self.upsampling = None
else:
self.output_size = None
self.upsampling = conv_utils.normalize_tuple(
upsampling, 2, 'upsampling')
def compute_output_shape(self, input_shape):
if self.upsampling:
height = self.upsampling[0] * \
input_shape[1] if input_shape[1] is not None else None
width = self.upsampling[1] * \
input_shape[2] if input_shape[2] is not None else None
else:
height = self.output_size[0]
width = self.output_size[1]
return (input_shape[0],
height,
width,
input_shape[3])
def call(self, inputs):
if self.upsampling:
return K.tf.image.resize_bilinear(inputs, (inputs.shape[1] * self.upsampling[0],
inputs.shape[2] * self.upsampling[1]),
align_corners=True)
else:
return K.tf.image.resize_bilinear(inputs, (self.output_size[0],
self.output_size[1]),
align_corners=True)
def get_config(self):
config = {'upsampling': self.upsampling,
'output_size': self.output_size,
'data_format': self.data_format}
base_config = super(BilinearUpsampling, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3):
""" SepConv with BN between depthwise & pointwise. Optionally add activation after BN
Implements right "same" padding for even kernel sizes
Args:
x: input tensor
filters: num of filters in pointwise convolution
prefix: prefix before name
stride: stride at depthwise conv
kernel_size: kernel size for depthwise convolution
rate: atrous rate for depthwise convolution
depth_activation: flag to use activation between depthwise & poinwise convs
epsilon: epsilon to use in BN layer
"""
if stride == 1:
depth_padding = 'same'
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
x = ZeroPadding2D((pad_beg, pad_end))(x)
depth_padding = 'valid'
if not depth_activation:
x = Activation('relu')(x)
x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate),
padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x)
x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x)
if depth_activation:
x = Activation('relu')(x)
x = Conv2D(filters, (1, 1), padding='same',
use_bias=False, name=prefix + '_pointwise')(x)
x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x)
if depth_activation:
x = Activation('relu')(x)
return x
def _conv2d_same(x, filters, prefix, stride=1, kernel_size=3, rate=1):
"""Implements right 'same' padding for even kernel sizes
Without this there is a 1 pixel drift when stride = 2
Args:
x: input tensor
filters: num of filters in pointwise convolution
prefix: prefix before name
stride: stride at depthwise conv
kernel_size: kernel size for depthwise convolution
rate: atrous rate for depthwise convolution
"""
if stride == 1:
return Conv2D(filters,
(kernel_size, kernel_size),
strides=(stride, stride),
padding='same', use_bias=False,
dilation_rate=(rate, rate),
name=prefix)(x)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
x = ZeroPadding2D((pad_beg, pad_end))(x)
return Conv2D(filters,
(kernel_size, kernel_size),
strides=(stride, stride),
padding='valid', use_bias=False,
dilation_rate=(rate, rate),
name=prefix)(x)
def _xception_block(inputs, depth_list, prefix, skip_connection_type, stride,
rate=1, depth_activation=False, return_skip=False):
""" Basic building block of modified Xception network
Args:
inputs: input tensor
depth_list: number of filters in each SepConv layer. len(depth_list) == 3
prefix: prefix before name
skip_connection_type: one of {'conv','sum','none'}
stride: stride at last depthwise conv
rate: atrous rate for depthwise convolution
depth_activation: flag to use activation between depthwise & pointwise convs
return_skip: flag to return additional tensor after 2 SepConvs for decoder
"""
residual = inputs
for i in range(3):
residual = SepConv_BN(residual,
depth_list[i],
prefix + '_separable_conv{}'.format(i + 1),
stride=stride if i == 2 else 1,
rate=rate,
depth_activation=depth_activation)
if i == 1:
skip = residual
if skip_connection_type == 'conv':
shortcut = _conv2d_same(inputs, depth_list[-1], prefix + '_shortcut',
kernel_size=1,
stride=stride)
shortcut = BatchNormalization(name=prefix + '_shortcut_BN')(shortcut)
outputs = layers.add([residual, shortcut])
elif skip_connection_type == 'sum':
outputs = layers.add([residual, inputs])
elif skip_connection_type == 'none':
outputs = residual
if return_skip:
return outputs, skip
else:
return outputs
def relu6(x):
return K.relu(x, max_value=6)
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id, skip_connection, rate=1):
in_channels = inputs._keras_shape[-1]
pointwise_conv_filters = int(filters * alpha)
pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
x = inputs
prefix = 'expanded_conv_{}_'.format(block_id)
if block_id:
# Expand
x = Conv2D(expansion * in_channels, kernel_size=1, padding='same',
use_bias=False, activation=None,
name=prefix + 'expand')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name=prefix + 'expand_BN')(x)
x = Activation(relu6, name=prefix + 'expand_relu')(x)
else:
prefix = 'expanded_conv_'
# Depthwise
x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None,
use_bias=False, padding='same', dilation_rate=(rate, rate),
name=prefix + 'depthwise')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name=prefix + 'depthwise_BN')(x)
x = Activation(relu6, name=prefix + 'depthwise_relu')(x)
# Project
x = Conv2D(pointwise_filters,
kernel_size=1, padding='same', use_bias=False, activation=None,
name=prefix + 'project')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name=prefix + 'project_BN')(x)
if skip_connection:
return Add(name=prefix + 'add')([inputs, x])
# if in_channels == pointwise_filters and stride == 1:
# return Add(name='res_connect_' + str(block_id))([inputs, x])
return x
def Deeplabv3(weights='pascal_voc', input_tensor=None, input_shape=(512, 512, 3), classes=21, backbone='mobilenetv2', OS=16, alpha=1.):
""" Instantiates the Deeplabv3+ architecture
Optionally loads weights pre-trained
on PASCAL VOC. This model is available for TensorFlow only,
and can only be used with inputs following the TensorFlow
data format `(width, height, channels)`.
# Arguments
weights: one of 'pascal_voc' (pre-trained on pascal voc)
or None (random initialization)
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: shape of input image. format HxWxC
PASCAL VOC model was trained on (512,512,3) images
classes: number of desired classes. If classes != 21,
last layer is initialized randomly
backbone: backbone to use. one of {'xception','mobilenetv2'}
OS: determines input_shape/feature_extractor_output ratio. One of {8,16}.
Used only for xception backbone.
alpha: controls the width of the MobileNetV2 network. This is known as the
width multiplier in the MobileNetV2 paper.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
Used only for mobilenetv2 backbone
# Returns
A Keras model instance.
# Raises
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
ValueError: in case of invalid argument for `weights` or `backbone`
"""
if not (weights in {'pascal_voc', None}):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `pascal_voc` '
'(pre-trained on PASCAL VOC)')
if K.backend() != 'tensorflow':
raise RuntimeError('The Deeplabv3+ model is only available with '
'the TensorFlow backend.')
if not (backbone in {'xception', 'mobilenetv2'}):
raise ValueError('The `backbone` argument should be either '
'`xception` or `mobilenetv2` ')
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if backbone == 'xception':
if OS == 8:
entry_block3_stride = 1
middle_block_rate = 2 # ! Not mentioned in paper, but required
exit_block_rates = (2, 4)
atrous_rates = (12, 24, 36)
else:
entry_block3_stride = 2
middle_block_rate = 1
exit_block_rates = (1, 2)
atrous_rates = (6, 12, 18)
x = Conv2D(32, (3, 3), strides=(2, 2),
name='entry_flow_conv1_1', use_bias=False, padding='same')(img_input)
x = BatchNormalization(name='entry_flow_conv1_1_BN')(x)
x = Activation('relu')(x)
x = _conv2d_same(x, 64, 'entry_flow_conv1_2', kernel_size=3, stride=1)
x = BatchNormalization(name='entry_flow_conv1_2_BN')(x)
x = Activation('relu')(x)
x = _xception_block(x, [128, 128, 128], 'entry_flow_block1',
skip_connection_type='conv', stride=2,
depth_activation=False)
x, skip1 = _xception_block(x, [256, 256, 256], 'entry_flow_block2',
skip_connection_type='conv', stride=2,
depth_activation=False, return_skip=True)
x = _xception_block(x, [728, 728, 728], 'entry_flow_block3',
skip_connection_type='conv', stride=entry_block3_stride,
depth_activation=False)
for i in range(16):
x = _xception_block(x, [728, 728, 728], 'middle_flow_unit_{}'.format(i + 1),
skip_connection_type='sum', stride=1, rate=middle_block_rate,
depth_activation=False)
x = _xception_block(x, [728, 1024, 1024], 'exit_flow_block1',
skip_connection_type='conv', stride=1, rate=exit_block_rates[0],
depth_activation=False)
x = _xception_block(x, [1536, 1536, 2048], 'exit_flow_block2',
skip_connection_type='none', stride=1, rate=exit_block_rates[1],
depth_activation=True)
else:
OS = 8
first_block_filters = _make_divisible(32 * alpha, 8)
x = Conv2D(first_block_filters,
kernel_size=3,
strides=(2, 2), padding='same',
use_bias=False, name='Conv')(img_input)
x = BatchNormalization(
epsilon=1e-3, momentum=0.999, name='Conv_BN')(x)
x = Activation(relu6, name='Conv_Relu6')(x)
x = _inverted_res_block(x, filters=16, alpha=alpha, stride=1,
expansion=1, block_id=0, skip_connection=False)
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2,
expansion=6, block_id=1, skip_connection=False)
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1,
expansion=6, block_id=2, skip_connection=True)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2,
expansion=6, block_id=3, skip_connection=False)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
expansion=6, block_id=4, skip_connection=True)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
expansion=6, block_id=5, skip_connection=True)
# stride in block 6 changed from 2 -> 1, so we need to use rate = 2
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, # 1!
expansion=6, block_id=6, skip_connection=False)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=7, skip_connection=True)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=8, skip_connection=True)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=9, skip_connection=True)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=10, skip_connection=False)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=11, skip_connection=True)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=12, skip_connection=True)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=2, # 1!
expansion=6, block_id=13, skip_connection=False)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=4,
expansion=6, block_id=14, skip_connection=True)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=4,
expansion=6, block_id=15, skip_connection=True)
x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1, rate=4,
expansion=6, block_id=16, skip_connection=False)
# end of feature extractor
# branching for Atrous Spatial Pyramid Pooling
# Image Feature branch
#out_shape = int(np.ceil(input_shape[0] / OS))
b4 = AveragePooling2D(pool_size=(int(np.ceil(input_shape[0] / OS)), int(np.ceil(input_shape[1] / OS))))(x)
b4 = Conv2D(256, (1, 1), padding='same',
use_bias=False, name='image_pooling')(b4)
b4 = BatchNormalization(name='image_pooling_BN', epsilon=1e-5)(b4)
b4 = Activation('relu')(b4)
b4 = BilinearUpsampling((int(np.ceil(input_shape[0] / OS)), int(np.ceil(input_shape[1] / OS))))(b4)
# simple 1x1
b0 = Conv2D(256, (1, 1), padding='same', use_bias=False, name='aspp0')(x)
b0 = BatchNormalization(name='aspp0_BN', epsilon=1e-5)(b0)
b0 = Activation('relu', name='aspp0_activation')(b0)
# there are only 2 branches in mobilenetV2. not sure why
if backbone == 'xception':
# rate = 6 (12)
b1 = SepConv_BN(x, 256, 'aspp1',
rate=atrous_rates[0], depth_activation=True, epsilon=1e-5)
# rate = 12 (24)
b2 = SepConv_BN(x, 256, 'aspp2',
rate=atrous_rates[1], depth_activation=True, epsilon=1e-5)
# rate = 18 (36)
b3 = SepConv_BN(x, 256, 'aspp3',
rate=atrous_rates[2], depth_activation=True, epsilon=1e-5)
# concatenate ASPP branches & project
x = Concatenate()([b4, b0, b1, b2, b3])
else:
x = Concatenate()([b4, b0])
x = Conv2D(256, (1, 1), padding='same',
use_bias=False, name='concat_projection')(x)
x = BatchNormalization(name='concat_projection_BN', epsilon=1e-5)(x)
x = Activation('relu')(x)
x = Dropout(0.1)(x)
# DeepLab v.3+ decoder
if backbone == 'xception':
# Feature projection
# x4 (x2) block
x = BilinearUpsampling(output_size=(int(np.ceil(input_shape[0] / 4)),
int(np.ceil(input_shape[1] / 4))))(x)
dec_skip1 = Conv2D(48, (1, 1), padding='same',
use_bias=False, name='feature_projection0')(skip1)
dec_skip1 = BatchNormalization(
name='feature_projection0_BN', epsilon=1e-5)(dec_skip1)
dec_skip1 = Activation('relu')(dec_skip1)
x = Concatenate()([x, dec_skip1])
x = SepConv_BN(x, 256, 'decoder_conv0',
depth_activation=True, epsilon=1e-5)
x = SepConv_BN(x, 256, 'decoder_conv1',
depth_activation=True, epsilon=1e-5)
# you can use it with arbitary number of classes
if classes == 21:
last_layer_name = 'logits_semantic'
else:
last_layer_name = 'custom_logits_semantic'
x = Conv2D(classes, (1, 1), padding='same', name=last_layer_name)(x)
x = BilinearUpsampling(output_size=(input_shape[0], input_shape[1]))(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
model = Model(inputs, x, name='deeplabv3plus')
# load weights
if weights == 'pascal_voc':
if backbone == 'xception':
weights_path = get_file('deeplabv3_xception_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH_X,
cache_subdir='models')
else:
weights_path = get_file('deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH_MOBILE,
cache_subdir='models')
model.load_weights(weights_path, by_name=True)
return model
def preprocess_input(x):
"""Preprocesses a numpy array encoding a batch of images.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
# Returns
Input array scaled to [-1.,1.]
"""
return imagenet_utils.preprocess_input(x, mode='tf') |
the-stack_0_15025 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class CertificateRegistrationProviderOperations(object):
"""CertificateRegistrationProviderOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: API Version. Constant value: "2015-08-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2015-08-01"
self.config = config
def list_operations(
self, custom_headers=None, raw=False, **operation_config):
"""Implements Csm operations Api to exposes the list of available Csm Apis
under the resource provider.
Implements Csm operations Api to exposes the list of available Csm Apis
under the resource provider.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of CsmOperationDescription
:rtype:
~azure.mgmt.web.models.CsmOperationDescriptionPaged[~azure.mgmt.web.models.CsmOperationDescription]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_operations.metadata['url']
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.CsmOperationDescriptionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_operations.metadata = {'url': '/providers/Microsoft.CertificateRegistration/operations'}
|
the-stack_0_15026 | '''
Copyright 2016 Tom Kenter
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations
under the License.
'''
import theano
import theano.tensor as T
import lasagne
import numpy as np
import sys
import os
import re
import _pickle as cPickle
#import cPickle
def makeOutputFileName(oArgs, iNrOfEmbeddings, iEmbeddingSize):
sShareWeights = "sharedWeights" if oArgs.bShareWeights else "noSharedWeights"
sReg = "reg" if oArgs.bRegularize else "noReg"
sLower = "noLc" if oArgs.bDontLowercase else "lc"
sPreInit = "preInit" if oArgs.sWord2VecFile else "noPreInit"
sGradientClippingBound = "noGradClip" \
if oArgs.fGradientClippingBound is None \
else ("gradClip_%f" % oArgs.fGradientClippingBound).replace(".", "_")
sOutputFile = "%s_%s_%s_lr_%s_%s_epochs_%d_batch_%d_neg_%d_voc_%dx%d_%s_%s_%s.pickle" % \
(oArgs.sLastLayer,
sShareWeights,
oArgs.sUpdate,
re.sub("_?0*$", '', ("%f" % oArgs.fLearningRate).replace(".", "_")),
sGradientClippingBound,
oArgs.iEpochs,
oArgs.iBatchSize,
oArgs.iNeg,
iNrOfEmbeddings - 1, # -1, because of the 0-embedding
iEmbeddingSize,
sReg,
sLower,
sPreInit)
return os.path.join(oArgs.OUTPUT_DIR, sOutputFile)
def storeWordEmbeddings(sOutputFile, npaWordEmbeddings, oVocab, oArgs):
if oArgs.bVerbose:
print("Storing word embeddings to %s" % sOutputFile)
fhOut = open(sOutputFile, mode='wb')
dSCBOW = {"oArgs": oArgs,
"npaWordEmbeddings": npaWordEmbeddings,
"oVocab": oVocab
}
cPickle.dump(dSCBOW, fhOut)
fhOut.close()
class softMaxLayer_matrix(lasagne.layers.MergeLayer):
'''
First layer gives a vector (or a batch of vectors, really)
Second layer gives a matrix (well, a batch of matrices)
We return a vector of numbers, just as many as there are cols in the second
layer matrix (NOTE that the second input layer is a transposed version of
the layer before it)
'''
def __init__(self, incomings, iEmbeddingSize, **kwargs):
super(softMaxLayer, self).__init__(incomings, **kwargs)
self.iEmbeddingSize = iEmbeddingSize
def get_output_shape_for(self, input_shapes):
# input_shapes come like this:
# [(batch_size, vectors_size), (batch_size, rows, cols)]
return (input_shapes[0][0], input_shapes[1][1])
def get_output_for(self, inputs, **kwargs):
exps = T.exp((inputs[0].reshape((-1, self.iEmbeddingSize, 1)) * \
inputs[1]).sum(axis=1))
return exps / exps.sum(axis=1).dimshuffle((0, 'x'))
class softMaxLayer(lasagne.layers.Layer):
def __init__(self, incoming, **kwargs):
super(softMaxLayer, self).__init__(incoming, **kwargs)
def get_output_shape_for(self, input_shape):
'''
The input is just a vector of numbers.
The output is also a vector, same size as the input.
'''
return input_shape
def get_output_for(self, input, **kwargs):
'''
Take the exp() of all inputs, and divide by the total.
'''
exps = T.exp(input)
return exps / exps.sum(axis=1).dimshuffle((0, 'x'))
class sigmoidLayer(lasagne.layers.MergeLayer):
'''
First layer gives a vector (or a batch of vectors, really)
Second layer gives a matrix (well, a batch of matrices)
We return a vector of numbers, just as many as there are cols in the second
layer matrix (NOTE that the second input layer is a transposed version of
the layer before it)
'''
def __init__(self, incomings, iEmbeddingSize, **kwargs):
super(sigmoidLayer, self).__init__(incomings, **kwargs)
self.iEmbeddingSize = iEmbeddingSize
def get_output_shape_for(self, input_shapes):
# input_shapes come like this:
# [(batch_size, vectors_size), (batch_size, rows, cols)]
return (input_shapes[0][0], input_shapes[1][1])
def get_output_for(self, inputs, **kwargs):
'''
We want a dot product of every row in inputs[0] (a vector) with every
row in inputs[1] (a matrix).
We do this 'by hand': we do a element-wise multiplication of every vector
in inputs[0] with every matrix in inputs[1], and sum the result.
'''
dots = (inputs[0].reshape((-1, self.iEmbeddingSize, 1)) * \
inputs[1]).sum(axis=1)
# Take the sigmoid
return 1.0 / (1.0 + T.exp(dots))
class cosineLayer(lasagne.layers.MergeLayer):
'''
First layer gives a vector (or a batch of vectors, really)
Second layer gives a matrix (well, a batch of matrices)
We return a vector of numbers, just as many as there are cols in the second
layer matrix (NOTE that the second input layer is a transposed version of
the layer before it)
'''
def __init__(self, incomings, iEmbeddingSize, **kwargs):
super(cosineLayer, self).__init__(incomings, **kwargs)
self.iEmbeddingSize = iEmbeddingSize
def get_output_shape_for(self, input_shapes):
# input_shapes come like this:
# [(batch_size, vectors_size), (batch_size, rows, cols)]
return (input_shapes[0][0], input_shapes[1][1])
def get_output_for(self, inputs, **kwargs):
'''
We want a dot product of every row in inputs[0] (a vector) with every
row in inputs[1] (a matrix).
We do this 'by hand': we do a element-wise multiplication of every vector
in inputs[0] with every matrix in inputs[1], and sum the result.
'''
dots = (inputs[0].reshape((-1, self.iEmbeddingSize, 1)) * \
inputs[1]).sum(axis=1)
# Make sure the braodcasting is right
norms_1 = T.sqrt(T.square(inputs[0]).sum(axis=1)).dimshuffle(0, 'x')
# NOTE that the embeddings are transposed in the previous layer
norms_2 = T.sqrt(T.square(inputs[1]).sum(axis=1))
norms = norms_1 * norms_2
return dots / norms
class averageLayer(lasagne.layers.Layer):
def __init__(self, incoming, fGradientClippingBound=None, **kwargs):
super(averageLayer, self).__init__(incoming, **kwargs)
self.fGradientClippingBound = fGradientClippingBound
def get_output_shape_for(self, input_shape):
'''
The input is a batch of word vectors.
The output is a single vector, same size as the input word embeddings
In other words, since we are averaging, we loose the penultimate dimension
'''
return (input_shape[0], input_shape[2])
def get_output_for(self, input, **kwargs):
'''
The input is a batch of word vectors.
The output the sum of the word embeddings divided by the number of
non-null word embeddings in the input.
What we do with the normalizers is, we go from
[[[.01, .02, .03], # Word embedding sentence 1, word 1
[.02, .3, .01], # Word embedding sentence 1, word 2
[.0, .0, .0]],
[[.05, .06, .063], # Word embedding sentence 2, word 1
[.034,.45, .05],
[.01, .001, .03]],
...
]
first to (so that is the inner non-zero sum(axis=2) part):
[[3, 3, 0], # Number of non-zero components per vector in sentence 1
[3, 3, 3], # Number of non-zero components per vector in sentence 1
...
]
and finally to (so that is the outer non-zero sum(axis=1) part):
[2, 3, ...]
and we reshape that to:
[[2], # Number of words in sentence 1
[3], # Number of words in sentence 2
...]
'''
# Sums of word embeddings (so the zero embeddings don't matter here)
sums = input.sum(axis=1)
# Can we do this cheaper (as in, more efficient)?
# NOTE that we explicitly cast the output of the last sum() to floatX
# as otherwise Theano will cast the result of 'sums / normalizers' to
# float64
normalisers = T.neq((T.neq(input, 0.0)).sum(axis=2, dtype='int32'), 0.0).sum(axis=1, dtype='floatX').reshape((-1, 1))
averages = sums / normalisers
if self.fGradientClippingBound is not None:
averages = theano.gradient.grad_clip(averages,
- self.fGradientClippingBound,
self.fGradientClippingBound)
return averages
class averageLayer_matrix(lasagne.layers.Layer):
def __init__(self, incoming, iNrOfSentences=None,
fGradientClippingBound=None, **kwargs):
super(averageLayer_matrix, self).__init__(incoming, **kwargs)
self.iNrOfSentences = iNrOfSentences
self.fGradientClippingBound = fGradientClippingBound
def get_output_shape_for(self, input_shape):
'''
The input is a batch of matrices of word vectors.
The output is a batch of vectors, one for each matrix, the same size as
the input word embeddings
In other words, since we are averaging, we loose the penultimate dimension
'''
return (input_shape[0], input_shape[1], input_shape[3])
def get_output_for(self, input, **kwargs):
'''
The input is a batch of matrices of word vectors.
The output the sum of the word embeddings divided by the number of
non-zero word embeddings in the input.
The idea with the normalisers is similar as in the normal averageLayer
'''
# Sums of word embeddings (so the zero embeddings don't matter here)
sums = input.sum(axis=2)
# Can we do this cheaper (as in, more efficient)?
# NOTE that we explicitly cast the output of the last sum() to floatX
# as otherwise Theano will cast the result of 'sums / normalizers' to
# float64
normalisers = T.neq((T.neq(input, 0.0)).sum(axis=3, dtype='int32'), 0.0).sum(axis=2, dtype='floatX').reshape((-1, self.iNrOfSentences, 1))
averages = sums / normalisers
if self.fGradientClippingBound is not None:
averages = theano.gradient.grad_clip(averages,
- self.fGradientClippingBound,
self.fGradientClippingBound)
return averages
class gateLayer(lasagne.layers.MergeLayer):
def __init__(self, incomings, **kwargs):
super(gateLayer, self).__init__(incomings, **kwargs)
def get_output_shape_for(self, input_shapes):
return input_shapes[1]
def get_output_for(self, inputs, **kwargs):
'''
First layer is a batch of embedding indices:
[[11,21,43,0,0],
[234,543,0,0,0,],
...
]
Second layer are the embeddings:
[ [[.02, .01...],
[.004, .005, ...],
...,
.0 .0 .0 ... ,
.0 .0 .0 ...],
[[...],
....
]
]
'''
return \
T.where(T.eq(inputs[0],0), np.float32(0.0), np.float32(1.0)).dimshuffle((0,1,'x')) * inputs[1]
class gateLayer_matrix(lasagne.layers.MergeLayer):
def __init__(self, incomings, **kwargs):
super(gateLayer_matrix, self).__init__(incomings, **kwargs)
def get_output_shape_for(self, input_shapes):
return input_shapes[1]
def get_output_for(self, inputs, **kwargs):
'''
First layer is a batch of matrices of embedding indices:
Second layer are the corresponding embeddings:
'''
return \
T.where(T.eq(inputs[0],0), np.float32(0.0), np.float32(1.0)).dimshuffle((0,1,2,'x')) * inputs[1]
class flipLayer(lasagne.layers.Layer):
'''
Flip the word embeddings of the negative examples.
So the word embeddings <we> of the negative examples will be become <-we>
'''
def __init__(self, incoming, iPos=None, iNrOfSentences=None, **kwargs):
super(flipLayer, self).__init__(incoming, **kwargs)
# Set all the values to -1
npaFlipper = np.ones(iNrOfSentences, dtype=np.int8) * -1
# Except for the first one/two (the positive examples)
npaFlipper[0:iPos] = 1
# Set the broadcasting right
self.flipper = theano.shared(npaFlipper).dimshuffle('x', 0, 'x', 'x')
def get_output_shape_for(self, input_shape):
return input_shape
def get_output_for(self, input, **kwargs):
return input * self.flipper
def preInit(tWeightShape, oW2v, oVocab):
assert tWeightShape == (oW2v.syn0.shape[0] + 1, oW2v.syn0.shape[1])
# Copy the embeddings
W = np.empty(tWeightShape, dtype=np.float32)
# NOTE that we start at 1 here (rather than 0)
W[1:tWeightShape[0],:] = oW2v.syn0
# Make a corresponding vocabulary
# We start at index 1 (0 is a dummy 0.0 embedding)
for i in range(oW2v.syn0.shape[0]):
sWord = oW2v.index2word[i]
iVocabIndex = i+1
oVocab.dVocab[sWord] = iVocabIndex
oVocab.dIndex2word[iVocabIndex] = sWord
return W
def nextBatch(oSentenceIterator, funcRandomIterator, oVocab=None,
npaBatch_1=None, npaBatch_2=None, iMaxNrOfTokens=None,
iBatchSize=None, iPos=None, iNeg=None):
'''
This function gives back a batch to train/test on.
It needs:
- a sentence iterator object that yields a triple of sentences:
(sentence n, sentence n-1, sentence n+1)
which are next to one another in the corpus.
These are considered positive examples.
- a sentence iterator that yields random sentences (so single sentences) from
the corpus. These are used as negative examples.
- the vocabulary object is usually empty
npaBatch_1 and npaBatch_2 should be pre-allocated arrays of size:
npaBatch_1: (iBatchSize, iMaxNrOfTokens)
npaBatch_2: (iBatchSize, iNeg + iPos, iMaxNrOfTokens)
'''
npaBatch_1[:] = 0.0 # Set the pre-allocated arrays to 0 again
npaBatch_2[:] = 0.0
iSentencePairsSampled = 0
# NOTE that because of how we do things, the last batch isn't included if
# it's smaller than the batch size
for tSentenceTuple in oSentenceIterator:
# NOTE in the toronto case, the sentence iterator already yields tokens
isTorontoFormat= oSentenceIterator.sName == "torontoSentenceIterator" or \
oSentenceIterator.sName =="week"
aWeIndices1 = \
[oVocab[sToken] for sToken in tSentenceTuple[0] \
if oVocab[sToken] is not None] \
if isTorontoFormat else \
[oVocab[sToken] for sToken in tSentenceTuple[0].split(' ') \
if oVocab[sToken] is not None]
aWeIndices2 = \
[oVocab[sToken] for sToken in tSentenceTuple[1] \
if oVocab[sToken] is not None] \
if isTorontoFormat else \
[oVocab[sToken] for sToken in tSentenceTuple[1].split(' ') \
if oVocab[sToken] is not None]
aWeIndices3 = None
if iPos == 2:
aWeIndices3 = \
[oVocab[sToken] for sToken in tSentenceTuple[2] \
if oVocab[sToken] is not None] \
if isTorontoFormat else \
[oVocab[sToken] for sToken in tSentenceTuple[2].split(' ') \
if oVocab[sToken] is not None]
# We only deal with triples all of which members contain at least one known
# word
if (len(aWeIndices1) == 0) or (len(aWeIndices2) == 0) or \
((iPos == 2) and (len(aWeIndices3) == 0)):
continue
npaBatch_1[iSentencePairsSampled][0:min(len(aWeIndices1),iMaxNrOfTokens)]=\
aWeIndices1[:iMaxNrOfTokens]
npaBatch_2[iSentencePairsSampled][0][0:min(len(aWeIndices2),iMaxNrOfTokens)] = aWeIndices2[:iMaxNrOfTokens]
if iPos == 2:
npaBatch_2[iSentencePairsSampled][1][0:min(len(aWeIndices3),iMaxNrOfTokens)] = aWeIndices3[:iMaxNrOfTokens]
iRandomSamples = 0
while 1: # We break from inside the loop
if iRandomSamples == iNeg: # So if iNeg == 0, we break right away
break
aWeIndicesRandom = []
while len(aWeIndicesRandom) == 0: # Get a non-empty random sentence
# NOTE that randomSentence is a list of tokens in the Toronto case
randomSentence = next(funcRandomIterator)
aWeIndicesRandom = \
[oVocab[sToken] for sToken in randomSentence \
if oVocab[sToken] is not None] \
if isTorontoFormat \
else [oVocab[sToken] for sToken in randomSentence.split(' ') \
if oVocab[sToken] is not None]
iRandomSamples += 1
npaBatch_2[iSentencePairsSampled][(iPos-1)+iRandomSamples][0:min(len(aWeIndicesRandom),iMaxNrOfTokens)] = aWeIndicesRandom[:iMaxNrOfTokens]
iSentencePairsSampled += 1
if iSentencePairsSampled == iBatchSize:
# Just yield something (npaBatch_1, npaBatch_2 are filled already)
yield 1
# Reset
iSentencePairsSampled = 0
def build_scbow(oArgs, iPos=None, oW2v=None, oVocab=None, tWeightShape=None):
# Input variable for a batch of sentences (so: sentence n)
input_var_1 = T.matrix('input_var_1', dtype='uint32')
# Input variable for a batch of positive and negative examples
# (so sentence n-1, sentence n+1, neg1, neg2, ...)
input_var_2 = T.tensor3('input_var_2', dtype='uint32')
W_init_1, W_init_2 = None, None
# First embedding input layer
llIn_1 = lasagne.layers.InputLayer(shape=(None, oArgs.iMaxNrOfTokens),
input_var=input_var_1,
name='llIn_1')
# Second embedding input layer
llIn_2 = lasagne.layers.InputLayer(shape=(None, iPos + oArgs.iNeg,
oArgs.iMaxNrOfTokens),
input_var=input_var_2,
name='llIn_2')
W_init_1 = None
if oW2v is None:
W_init_1 = lasagne.init.Normal().sample(tWeightShape)
else: ## Here is the pre-initialization
W_init_1 = preInit(tWeightShape, oW2v, oVocab)
W_init_1[0,:] = 0.0
# First embedding layer
llEmbeddings_1 = lasagne.layers.EmbeddingLayer(
llIn_1,
input_size=tWeightShape[0],
output_size=tWeightShape[1],
W=W_init_1,
name='llEmbeddings_1')
llGate_1 = gateLayer([llIn_1, llEmbeddings_1], name='llGate_1')
llAverage_1 = averageLayer(llGate_1,
fGradientClippingBound=oArgs.fGradientClippingBound,
name='llAverage_1')
W_init_2 = None
if not oArgs.bShareWeights:
if oW2v is None:
W_init_2 = lasagne.init.Normal().sample(tWeightShape)
else: # We are not sharing, but we are pre-initializing
preInit(W_init_2, oW2v, oVocab)
W_init_2[0,:] = 0.0
# Second embedding layer, the weights tied with the first embedding layer
llEmbeddings_2 = lasagne.layers.EmbeddingLayer(
llIn_2,
input_size=tWeightShape[0],
output_size=tWeightShape[1],
W=llEmbeddings_1.W if oArgs.bShareWeights else W_init_2,
name='llEmbeddings_2')
llGate_2 = gateLayer_matrix([llIn_2, llEmbeddings_2], name='llGate_2')
llAverage_2 = None
if oArgs.sLastLayer == 'cosine':
llAverage_2 = \
averageLayer_matrix(llGate_2, iNrOfSentences=iPos + oArgs.iNeg,
fGradientClippingBound=\
oArgs.fGradientClippingBound,
name="llAverage_2")
else:
llFlip_2 = flipLayer(llGate_2, iPos=iPos, iNrOfSentences=iPos + oArgs.iNeg,
name='llFlip_2')
llAverage_2 = \
averageLayer_matrix(llFlip_2, iNrOfSentences=iPos + oArgs.iNeg,
fGradientClippingBound=\
oArgs.fGradientClippingBound,
name="llAverage_2")
llTranspose_2 = lasagne.layers.DimshuffleLayer(llAverage_2, (0,2,1),
name='llTranspose_2')
llFinalLayer = None
if oArgs.sLastLayer == 'cosine':
llCosine = cosineLayer([llAverage_1, llTranspose_2], tWeightShape[1],
name='llCosine')
llFinalLayer = softMaxLayer(llCosine, name='llSoftMax')
else:
llFinalLayer = sigmoidLayer([llAverage_1, llTranspose_2], tWeightShape[1],
name='llSigmoid')
### That was all the network stuff
### Now let's build the functions
# Target var if needed
target_var = T.fmatrix('targets') if oArgs.sLastLayer == "cosine" else None
if oArgs.bVerbose:
print("Building prediction functions")
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
prediction = lasagne.layers.get_output(llFinalLayer)
if oArgs.bVerbose:
print("Building loss functions")
# For checking/debugging
forward_pass_fn = theano.function([input_var_1, input_var_2], prediction)
loss = None
if oArgs.sLastLayer == 'cosine':
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
else: # sigmoid
loss = - T.log(prediction).sum(axis=1)
if oArgs.bRegularize:
l2_penalty = regularize_layer_params(llFinalLayer, l2)
loss = loss + l2_penalty
loss = loss.mean()
if oArgs.bVerbose:
print("Building update functions")
params = lasagne.layers.get_all_params(llFinalLayer, trainable=True)
fStartLearningRate = np.float32(oArgs.fLearningRate)
thsLearningRate = None
updates = None
if oArgs.sUpdate == 'nesterov':
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=oArgs.fLearningRate,
momentum=oArgs.fMomentum)
elif oArgs.sUpdate == 'adamax':
updates = lasagne.updates.adamax(loss, params,
learning_rate=oArgs.fLearningRate)
elif oArgs.sUpdate == 'adadelta':
updates = lasagne.updates.adadelta(loss, params,
learning_rate=oArgs.fLearningRate)
elif oArgs.sUpdate == "sgd":
# This doesn't work with INEX for now...
thsLearningRate = theano.shared(fStartLearningRate)
updates = lasagne.updates.sgd(loss, params, learning_rate=thsLearningRate)
if oArgs.bVerbose:
print("Building training function")
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = None
if oArgs.sLastLayer == "cosine":
train_fn = theano.function([input_var_1, input_var_2, target_var],
loss,
updates=updates)
else:
train_fn = theano.function([input_var_1, input_var_2],
loss,
updates=updates)
return llFinalLayer, forward_pass_fn, thsLearningRate, train_fn
def updateLearningRate(thsLearningRate, iNrOfBatchesSoFar, fTotalNrOfBatches,
oArgs):
fNewLearningRate = \
max(oArgs.fLearningRate * 0.0001,
oArgs.fLearningRate * (1.0 - (iNrOfBatchesSoFar / fTotalNrOfBatches))
)
thsLearningRate.set_value(fNewLearningRate)
if oArgs.bVeryVerbose:
print("Batch %d of %.0f" % (iNrOfBatchesSoFar, fTotalNrOfBatches))
print("Learning rate: %f" % thsLearningRate.get_value())
def parseArguments():
import argparse
oArgsParser = argparse.ArgumentParser(description='Siamese CBOW')
oArgsParser.add_argument('DATA',
help="File (in PPDB case) or directory (in Toronto Book Corpus and INEX case) to read the data from. NOTE that the program runs in aparticular input mode (INEX/PPDB/TORONTO) which is deduced from the directory/file name)")
oArgsParser.add_argument('OUTPUT_DIR',
help="A file to store the final and possibly intermediate word embeddings to (in cPickle format)")
oArgsParser.add_argument('-batch_size', metavar="INT", dest="iBatchSize",
help="Batch size. Default: 1",
type=int, action="store", default=1)
oArgsParser.add_argument('-dont_lowercase', dest='bDontLowercase',
help="By default, all input text is lowercased. Use this option to prevent this.",
action='store_true')
oArgsParser.add_argument('-dry_run', dest="bDryRun",
help="Build the network, print some statistics (if -v is on) and quit before training starts.",
action="store_true")
oArgsParser.add_argument('-embedding_size', metavar="INT",
dest="iEmbeddingSize",
help="Dimensionality of the word embeddings. Default: 300",
type=int, action="store", default=300)
oArgsParser.add_argument('-epochs', metavar="INT", dest="iEpochs",
help="Maximum number of epochs for training. Default: 10",
type=int, action="store", default=10)
oArgsParser.add_argument('-gradient_clipping_bound', metavar="FLOAT",
dest="fGradientClippingBound",
help="Gradient clipping bound (so gradients will be clipped to [-FLOAT, +FLOAT]).",
type=float, action="store")
oArgsParser.add_argument('-last_layer', metavar="LAYER",
dest="sLastLayer",
help="Last layer is 'cosine' or 'sigmoid'. NOTE that this choice also determines the loss function (binary cross entropy or negative sampling loss, respectively). Default: cosine",
action="store", default='cosine',
choices=['cosine', 'sigmoid'])
oArgsParser.add_argument('-learning_rate', metavar="FLOAT",
dest="fLearningRate",
help="Learning rate. Default: 1.0",
type=float, action="store", default=1.0)
oArgsParser.add_argument('-max_nr_of_tokens', metavar="INT",
dest="iMaxNrOfTokens",
help="Maximum number of tokens considered per sentence. Default: 50",
type=int, action="store", default=50)
oArgsParser.add_argument('-max_nr_of_vocab_words', metavar="INT",
dest="iMaxNrOfVocabWords",
help="Maximum number of words considered. If this is not specified, all words are considered",
type=int, action="store")
oArgsParser.add_argument('-momentum', metavar="FLOAT",
dest="fMomentum",
help="Momentum, only applies when 'nesterov' is used as update method (see -update). Default: 0.0",
type=float, action="store", default=0.0)
oArgsParser.add_argument('-neg', metavar="INT", dest="iNeg",
help="Number of negative examples. Default: 1",
type=int, action="store", default=1)
oArgsParser.add_argument('-regularize', dest="bRegularize",
help="Use l2 normalization on the parameters of the network",
action="store_true")
oArgsParser.add_argument('-share_weights', dest="bShareWeights",
help="Turn this option on (a good idea in general) for the embedding weights of the input sentences and the other sentences to be shared.",
action="store_true")
oArgsParser.add_argument('-start_storing_at', metavar="INT",
dest="iStartStoringAt",
help="Start storing embeddings at epoch number INT. Default: 0. I.e. start storing right away (if -store_at_epoch is on, that is)",
action="store", type=int, default=0)
oArgsParser.add_argument('-store_at_batch', metavar="INT",
dest="iStoreAtBatch",
help="Store embeddings every INT batches.",
action="store", type=int, default=None)
oArgsParser.add_argument('-store_at_epoch', dest="iStoreAtEpoch",
metavar="INT",
help="Store embeddings every INT epochs (so 1 for storing at the end of every epoch, 10 for for storing every 10 epochs, etc.).",
action="store", type=int)
oArgsParser.add_argument('-update', metavar="UPDATE_ALGORITHM",
dest="sUpdate",
help="Update algorithm. Options are 'adadelta', 'adamax', 'nesterov' (which uses momentum) and 'sgd'. Default: 'adadelta'",
action="store", default='adadelta',
choices=['adadelta', 'adamax', 'sgd',
'nesterov'])
oArgsParser.add_argument("-v", dest="bVerbose", action="store_true",
help="Be verbose")
oArgsParser.add_argument('-vocab', dest="sVocabFile", metavar="FILE",
help="A vocabulary file is simply a file, SORTED BY FREQUENCY of frequence<SPACE>word lines. You can take the top n of these (which is why it should be sorted by frequency). See -max_nr_of_vocab_words.",
action="store")
oArgsParser.add_argument("-vv", dest="bVeryVerbose", action="store_true",
help="Be very verbose")
oArgsParser.add_argument('-w2v', dest="sWord2VecFile", metavar="FILE",
help="A word2vec model can be used to initialize the weights for words in the vocabulary file from (missing words just get a random embedding). If the weights are not initialized this way, they will be trained from scratch.",
action="store")
oArgsParser.add_argument('-wk',dest="week",action='store')
oArgs = oArgsParser.parse_args()
if (oArgs.sVocabFile is None) and (oArgs.sWord2VecFile is None):
print >>sys.stderr, "[ERROR] Please specify either a word2vec file or a vocab file"
exit(1)
if oArgs.bVeryVerbose: # If we are very verbose, we are also just verbose
oArgs.bVerbose=True
return oArgs
if __name__ == "__main__":
oArgs = parseArguments()
iPos=2
# Prepare Theano variables for inputs and targets
# Input variable for a batch of left sentences
input_var_1 = T.matrix('input_var_1', dtype='int32')
# Input variable for a batch of right sentences, plus negative examples
input_var_2 = T.tensor3('input_var_2', dtype='int32')
target_var = T.fmatrix('targets') if oArgs.sLastLayer == "cosine" else None
npaWordEmbeddings = np.array([[.1, .2, .3, .4],
[.2, .3, .4, 5],
[-.7, -.4, -.5, -.6],
[-.8, -.9, -.45, -.56],
[.2131, .213, .434, .652]]
).astype(np.float32)
dModel = None
if oArgs.sStoredModel is not None:
import cPickle
fhFile = open(oArgs.sStoredModel, mode='rb')
dModel = cPickle.load(fhFile)
fhFile.close()
npaWordEmbeddings = dModel['npaWordEmbeddings']
npaTargets = np.zeros((oArgs.iBatchSize, oArgs.iNeg + iPos),
dtype=np.float32)
if iPos == 2:
npaTargets[:,[0,1]] = .5
else:
npaTargets[:,0] = 1.0
iNrOfEmbeddings, iEmbeddingSize = npaWordEmbeddings.shape
npaInput_1 = np.array([[0,1], [0,1], [1,0]]).astype('int32')
npaInput_2 = np.array([[2,1], [3,2], [1,0]]).astype('int32')
npaInput_3 = np.array([[2,3], [1,2], [1,4]]).astype('int32')
iMaxNrOfTokens = 2
network = build_scbow(input_var_1, input_var_2,
iBatchSize=oArgs.iBatchSize,
iPos=iPos,
iNeg=oArgs.iNeg, iMaxNrOfTokens=iMaxNrOfTokens,
tWeightShape=npaWordEmbeddings.shape,
npaWordEmbeddings=npaWordEmbeddings,
sLastLayer=oArgs.sLastLayer,
bVerbose=oArgs.bVerbose)
prediction = lasagne.layers.get_output(network)
forward_pass_fn = theano.function([input_var_1, input_var_2],
prediction)
# We want to maximize the sum of the log probabilities, so we want to
# minimize this loss objective
# NOTE that we expect the word embeddings of the negative examples to be
# reversed (as in: -1 * word embedding)
npaLossBoost = np.ones(oArgs.iNeg + iPos, dtype=np.float32)
#npaLossBoost[0:iPos] = oArgs.fLossBoost
loss = None
if oArgs.sLastLayer == 'cosine':
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
else: # sigmoid
loss = - (T.log(prediction) * npaLossBoost).sum(axis=1)
loss_fn = theano.function([prediction, target_var], loss)
# Pre-allocate memory
npaBatch_1 = np.zeros((oArgs.iBatchSize, iMaxNrOfTokens),
dtype=np.int8)
npaBatch_2 = np.zeros((oArgs.iBatchSize, oArgs.iNeg + iPos, iMaxNrOfTokens),
dtype=np.int8)
# Check that the network is producing anything
for i in moetNog(npaInput_1, npaInput_2, npaInput_3,
npaBatch_1, npaBatch_2,
iNeg=oArgs.iNeg, iBatchSize=oArgs.iBatchSize,
bShuffle=False):
# Check the batch itself
print("Batch (1):\n%s\n (2)\n%s" % (npaBatch_1, npaBatch_2))
npaPredictions = forward_pass_fn(npaBatch_1, npaBatch_2)
print("Predictions (%s):\n%s" % (npaPredictions[0].dtype, npaPredictions))
L = loss_fn(npaPredictions, npaTargets)
print("Loss: %s" % L)
|
the-stack_0_15028 | # -*- coding: utf-8 -*-
"""
<application name>
Copyright ©<year> <author>
Licensed under the terms of the <LICENSE>.
See LICENSE for details.
@author: <author>
"""
# Setup PyQt's v2 APIs
import sip
API_NAMES = ["QDate", "QDateTime", "QString", "QTextStream", "QTime", "QUrl",
"QVariant"]
API_VERSION = 2
for name in API_NAMES:
sip.setapi(name, API_VERSION) |
the-stack_0_15032 | import datetime
import pytz
import calendar
import os
cdt = datetime.datetime.now().date()
str_cdt=cdt.strftime("%d/%B")
print(str_cdt)
#заношу в виде строки
date_to_str=str(cdt)
#меняю в строке - на _
clear_str=date_to_str.replace('-',' ')
#разбиваю на отдельные слова
slice_str=clear_str.split()
#cоздаю новую дату +2 дня к ней
creat_date=datetime.datetime(int(slice_str[0]),int(slice_str[1]),int(slice_str[2])+2)
print("созданная дата 17.00 : ",creat_date)
# через dimedelta
td=datetime.timedelta(minutes=3)
print("отнимаем от созаднной даты 3 минуты(td) : ",creat_date-td)
#из строки делаем время
strptime=datetime.datetime.strptime('1803191327','%d%m%y%H%M')
print("из строки далем время",strptime)
# import sys
# print("sys.path : \n",sys.path)
# print("\n fp_date.__file__ : \n",pytz.__file__)
# print(fp_date.count_lines())
# print(fp_date.last_word_on_line(6666))
# print(fp_date.def_counter(fp_date.get_ip(1)))
print('_'*30)
format_date_on_log='17/May/2015:10:05:00'
#cоздаю дату из строки введенных данных
cdt=datetime.datetime(2020,5,5)
print('созданная дата из цфр ',cdt)
format_calculate_date='0:00:16.603720'
print("format_date_on_log=",format_date_on_log)
time_obj=datetime.datetime.strptime(format_date_on_log,'%d/%B/%Y:%H:%M:%S')
print("time_obj",time_obj)
tz_minsk = pytz.timezone("Europe/Minsk")
tz_utc = pytz.timezone('UTC')
ct_minsk=datetime.datetime.now()
d_minsk=tz_minsk.localize(ct_minsk)
utc_minsk=d_minsk.astimezone(tz_utc)
print('_'*30)
print(utc_minsk)
print(d_minsk)
#текущее время в UTC
# ct_utc=datetime.datetime.utcnow()
#текущенее время в +3
# ct_minsk=datetime.datetime.now()
#
# d_minsk=tz_minsk.localize(ct_minsk)
# d_utc=tz_utc.localize(ct_utc)
# d_minsk_utc=d_utc.astimezone(tz_utc)
# print("Вывод d_minsk в формете +3 Europe/Minsk :",d_minsk)
# print("Вывод d_minsk в формете UTC :",d_utc)
# print("Вывод d_minsk в формете d_minsk_utc :",d_minsk_utc)
os.system('cls' if os.name=='nt' else 'clear') |
the-stack_0_15033 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import, division, unicode_literals
import json
import math
import time
from datetime import date, datetime, timedelta
from decimal import Decimal
from json.encoder import encode_basestring
from math import floor
from mo_dots import Data, FlatList, Null, NullType, SLOT, is_data, is_list
from mo_future import PYPY, binary_type, is_binary, is_text, long, sort_using_key, text, utf8_json_encoder, xrange
from mo_json import ESCAPE_DCT, float2json, scrub
from mo_logs import Except
from mo_logs.strings import quote
from mo_times import Timer
from mo_times.dates import Date
from mo_times.durations import Duration
json_decoder = json.JSONDecoder().decode
_get = object.__getattribute__
_ = Except
# THIS FILE EXISTS TO SERVE AS A FAST REPLACEMENT FOR JSON ENCODING
# THE DEFAULT JSON ENCODERS CAN NOT HANDLE A DIVERSITY OF TYPES *AND* BE FAST
#
# 1) WHEN USING cPython, WE HAVE NO COMPILER OPTIMIZATIONS: THE BEST STRATEGY IS TO
# CONVERT THE MEMORY STRUCTURE TO STANDARD TYPES AND SEND TO THE INSANELY FAST
# DEFAULT JSON ENCODER
# 2) WHEN USING PYPY, WE USE CLEAR-AND-SIMPLE PROGRAMMING SO THE OPTIMIZER CAN DO
# ITS JOB. ALONG WITH THE UnicodeBuilder WE GET NEAR C SPEEDS
COMMA = u","
QUOTE = u'"'
COLON = u":"
QUOTE_COLON = QUOTE + COLON
COMMA_QUOTE = COMMA + QUOTE
PRETTY_COMMA = u", "
PRETTY_COLON = u": "
if PYPY:
# UnicodeBuilder IS ABOUT 2x FASTER THAN list()
from __pypy__.builders import UnicodeBuilder
else:
class UnicodeBuilder(list):
def __init__(self, length=None):
list.__init__(self)
def build(self):
return u"".join(self)
append = UnicodeBuilder.append
_dealing_with_problem = False
def pypy_json_encode(value, pretty=False):
"""
pypy DOES NOT OPTIMIZE GENERATOR CODE WELL
"""
global _dealing_with_problem
if pretty:
return pretty_json(value)
try:
_buffer = UnicodeBuilder(2048)
_value2json(value, _buffer)
output = _buffer.build()
return output
except Exception as e:
# THE PRETTY JSON WILL PROVIDE MORE DETAIL ABOUT THE SERIALIZATION CONCERNS
from mo_logs import Log
if _dealing_with_problem:
Log.error("Serialization of JSON problems", e)
else:
Log.warning("Serialization of JSON problems", e)
_dealing_with_problem = True
try:
return pretty_json(value)
except Exception as f:
Log.error("problem serializing object", f)
finally:
_dealing_with_problem = False
class cPythonJSONEncoder(object):
def __init__(self, sort_keys=True):
object.__init__(self)
self.encoder = utf8_json_encoder
def encode(self, value, pretty=False):
if pretty:
return pretty_json(value)
try:
with Timer("scrub", too_long=0.1):
scrubbed = scrub(value)
param = {"size": 0}
with Timer("encode {{size}} characters", param=param, too_long=0.1):
output = text(self.encoder(scrubbed))
param["size"] = len(output)
return output
except Exception as e:
from mo_logs.exceptions import Except
from mo_logs import Log
e = Except.wrap(e)
Log.warning("problem serializing {{type}}", type=text(repr(value)), cause=e)
raise e
def ujson_encode(value, pretty=False):
if pretty:
return pretty_json(value)
try:
scrubbed = scrub(value)
return ujson_dumps(scrubbed, ensure_ascii=False, sort_keys=True, escape_forward_slashes=False).decode('utf8')
except Exception as e:
from mo_logs.exceptions import Except
from mo_logs import Log
e = Except.wrap(e)
Log.warning("problem serializing {{type}}", type=text(repr(value)), cause=e)
raise e
def _value2json(value, _buffer):
try:
_class = value.__class__
if value is None:
append(_buffer, u"null")
return
elif value is True:
append(_buffer, u"true")
return
elif value is False:
append(_buffer, u"false")
return
type = value.__class__
if type is binary_type:
append(_buffer, QUOTE)
try:
v = value.decode('utf8')
except Exception as e:
problem_serializing(value, e)
for c in v:
append(_buffer, ESCAPE_DCT.get(c, c))
append(_buffer, QUOTE)
elif type is text:
append(_buffer, QUOTE)
for c in value:
append(_buffer, ESCAPE_DCT.get(c, c))
append(_buffer, QUOTE)
elif type is dict:
if not value:
append(_buffer, u"{}")
else:
_dict2json(value, _buffer)
return
elif type is Data:
d = _get(value, SLOT) # MIGHT BE A VALUE NOT A DICT
_value2json(d, _buffer)
return
elif type in (int, long, Decimal):
append(_buffer, text(value))
elif type is float:
if math.isnan(value) or math.isinf(value):
append(_buffer, u'null')
else:
append(_buffer, float2json(value))
elif type in (set, list, tuple, FlatList):
_list2json(value, _buffer)
elif type is date:
append(_buffer, float2json(time.mktime(value.timetuple())))
elif type is datetime:
append(_buffer, float2json(time.mktime(value.timetuple())))
elif type is Date:
append(_buffer, float2json(value.unix))
elif type is timedelta:
append(_buffer, float2json(value.total_seconds()))
elif type is Duration:
append(_buffer, float2json(value.seconds))
elif type is NullType:
append(_buffer, u"null")
elif is_data(value):
if not value:
append(_buffer, u"{}")
else:
_dict2json(value, _buffer)
return
elif hasattr(value, '__data__'):
d = value.__data__()
_value2json(d, _buffer)
elif hasattr(value, '__json__'):
j = value.__json__()
append(_buffer, j)
elif hasattr(value, '__iter__'):
_iter2json(value, _buffer)
else:
from mo_logs import Log
Log.error(text(repr(value)) + " is not JSON serializable")
except Exception as e:
from mo_logs import Log
Log.error(text(repr(value)) + " is not JSON serializable", cause=e)
def _list2json(value, _buffer):
if not value:
append(_buffer, u"[]")
else:
sep = u"["
for v in value:
append(_buffer, sep)
sep = COMMA
_value2json(v, _buffer)
append(_buffer, u"]")
def _iter2json(value, _buffer):
append(_buffer, u"[")
sep = u""
for v in value:
append(_buffer, sep)
sep = COMMA
_value2json(v, _buffer)
append(_buffer, u"]")
def _dict2json(value, _buffer):
try:
prefix = u"{\""
for k, v in value.items():
append(_buffer, prefix)
prefix = COMMA_QUOTE
if is_binary(k):
k = k.decode('utf8')
for c in k:
append(_buffer, ESCAPE_DCT.get(c, c))
append(_buffer, QUOTE_COLON)
_value2json(v, _buffer)
append(_buffer, u"}")
except Exception as e:
from mo_logs import Log
Log.error(text(repr(value)) + " is not JSON serializable", cause=e)
ARRAY_ROW_LENGTH = 80
ARRAY_ITEM_MAX_LENGTH = 30
ARRAY_MAX_COLUMNS = 10
INDENT = " "
def pretty_json(value):
try:
if value is False:
return "false"
elif value is True:
return "true"
elif is_data(value):
try:
items = sort_using_key(value.items(), lambda r: r[0])
values = [encode_basestring(k) + PRETTY_COLON + pretty_json(v) for k, v in items if v != None]
if not values:
return "{}"
elif len(values) == 1:
return "{" + values[0] + "}"
else:
return "{\n" + ",\n".join(indent(v) for v in values) + "\n}"
except Exception as e:
from mo_logs import Log
from mo_math import OR
if OR(not is_text(k) for k in value.keys()):
Log.error(
"JSON must have string keys: {{keys}}:",
keys=[k for k in value.keys()],
cause=e
)
Log.error(
"problem making dict pretty: keys={{keys}}:",
keys=[k for k in value.keys()],
cause=e
)
elif value in (None, Null):
return "null"
elif value.__class__ in (binary_type, text):
if is_binary(value):
value = value.decode('utf8')
try:
if "\n" in value and value.strip():
return pretty_json({"$concat": value.split("\n"), "separator": "\n"})
else:
return quote(value)
except Exception as e:
from mo_logs import Log
try:
Log.note("try explicit convert of string with length {{length}}", length=len(value))
acc = [QUOTE]
for c in value:
try:
try:
c2 = ESCAPE_DCT[c]
except Exception:
c2 = c
c3 = text(c2)
acc.append(c3)
except BaseException:
pass
# Log.warning("odd character {{ord}} found in string. Ignored.", ord= ord(c)}, cause=g)
acc.append(QUOTE)
output = u"".join(acc)
Log.note("return value of length {{length}}", length=len(output))
return output
except BaseException as f:
Log.warning("can not convert {{type}} to json", type=f.__class__.__name__, cause=f)
return "null"
elif is_list(value):
if not value:
return "[]"
if ARRAY_MAX_COLUMNS == 1:
return "[\n" + ",\n".join([indent(pretty_json(v)) for v in value]) + "\n]"
if len(value) == 1:
j = pretty_json(value[0])
if j.find("\n") >= 0:
return "[\n" + indent(j) + "\n]"
else:
return "[" + j + "]"
js = [pretty_json(v) for v in value]
max_len = max(*[len(j) for j in js])
if max_len <= ARRAY_ITEM_MAX_LENGTH and max(*[j.find("\n") for j in js]) == -1:
# ALL TINY VALUES
num_columns = max(1, min(ARRAY_MAX_COLUMNS, int(floor((ARRAY_ROW_LENGTH + 2.0) / float(max_len + 2))))) # +2 TO COMPENSATE FOR COMMAS
if len(js) <= num_columns: # DO NOT ADD \n IF ONLY ONE ROW
return "[" + PRETTY_COMMA.join(js) + "]"
if num_columns == 1: # DO NOT rjust IF THERE IS ONLY ONE COLUMN
return "[\n" + ",\n".join([indent(pretty_json(v)) for v in value]) + "\n]"
content = ",\n".join(
PRETTY_COMMA.join(j.rjust(max_len) for j in js[r:r + num_columns])
for r in xrange(0, len(js), num_columns)
)
return "[\n" + indent(content) + "\n]"
pretty_list = js
output = ["[\n"]
for i, p in enumerate(pretty_list):
try:
if i > 0:
output.append(",\n")
output.append(indent(p))
except Exception:
from mo_logs import Log
Log.warning("problem concatenating string of length {{len1}} and {{len2}}",
len1=len("".join(output)),
len2=len(p)
)
output.append("\n]")
try:
return "".join(output)
except Exception as e:
from mo_logs import Log
Log.error("not expected", cause=e)
elif hasattr(value, '__data__'):
d = value.__data__()
return pretty_json(d)
elif hasattr(value, '__json__'):
j = value.__json__()
if j == None:
return " null " # TODO: FIND OUT WHAT CAUSES THIS
return pretty_json(json_decoder(j))
elif scrub(value) is None:
return "null"
elif hasattr(value, '__iter__'):
return pretty_json(list(value))
elif hasattr(value, '__call__'):
return "null"
else:
try:
if int(value) == value:
return text(int(value))
except Exception:
pass
try:
if float(value) == value:
return text(float(value))
except Exception:
pass
return pypy_json_encode(value)
except Exception as e:
problem_serializing(value, e)
def problem_serializing(value, e=None):
"""
THROW ERROR ABOUT SERIALIZING
"""
from mo_logs import Log
try:
typename = type(value).__name__
except Exception:
typename = "<error getting name>"
try:
rep = text(repr(value))
except Exception as _:
rep = None
if rep == None:
Log.error(
"Problem turning value of type {{type}} to json",
type=typename,
cause=e
)
else:
Log.error(
"Problem turning value ({{value}}) of type {{type}} to json",
value=rep,
type=typename,
cause=e
)
def indent(value, prefix=INDENT):
try:
content = value.rstrip()
suffix = value[len(content):]
lines = content.splitlines()
return prefix + (u"\n" + prefix).join(lines) + suffix
except Exception as e:
raise Exception(u"Problem with indent of value (" + e.message + u")\n" + value)
def value_compare(a, b):
if a == None:
if b == None:
return 0
return -1
elif b == None:
return 1
if a > b:
return 1
elif a < b:
return -1
else:
return 0
def datetime2milli(d, type):
try:
if type == datetime:
diff = d - datetime(1970, 1, 1)
else:
diff = d - date(1970, 1, 1)
return long(diff.total_seconds()) * long(1000) + long(diff.microseconds / 1000)
except Exception as e:
problem_serializing(d, e)
def unicode_key(key):
"""
CONVERT PROPERTY VALUE TO QUOTED NAME OF SAME
"""
if not isinstance(key, (text, binary_type)):
from mo_logs import Log
Log.error("{{key|quote}} is not a valid key", key=key)
return quote(text(key))
# OH HUM, cPython with uJSON, OR pypy WITH BUILTIN JSON?
# http://liangnuren.wordpress.com/2012/08/13/python-json-performance/
# http://morepypy.blogspot.ca/2011/10/speeding-up-json-encoding-in-pypy.html
if PYPY:
json_encoder = pypy_json_encode
else:
# from ujson import dumps as ujson_dumps
# json_encoder = ujson_encode
json_encoder = cPythonJSONEncoder().encode
|
the-stack_0_15035 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from copy import deepcopy
from nnunet.network_architecture.generic_UNet import Generic_UNet
import SimpleITK as sitk
import shutil
from batchgenerators.utilities.file_and_folder_operations import join
def split_4d_nifti(filename, output_folder):
img_itk = sitk.ReadImage(filename)
dim = img_itk.GetDimension()
file_base = filename.split("/")[-1]
if dim == 3:
shutil.copy(filename, join(output_folder, file_base[:-7] + "_0000.nii.gz"))
return
elif dim != 4:
raise RuntimeError("Unexpected dimensionality: %d of file %s, cannot split" % (dim, filename))
else:
img_npy = sitk.GetArrayFromImage(img_itk)
spacing = img_itk.GetSpacing()
origin = img_itk.GetOrigin()
direction = np.array(img_itk.GetDirection()).reshape(4,4)
# now modify these to remove the fourth dimension
spacing = tuple(list(spacing[:-1]))
origin = tuple(list(origin[:-1]))
direction = tuple(direction[:-1, :-1].reshape(-1))
for i, t in enumerate(range(img_npy.shape[0])):
img = img_npy[t]
img_itk_new = sitk.GetImageFromArray(img)
img_itk_new.SetSpacing(spacing)
img_itk_new.SetOrigin(origin)
img_itk_new.SetDirection(direction)
sitk.WriteImage(img_itk_new, join(output_folder, file_base[:-7] + "_%04.0d.nii.gz" % i))
def get_pool_and_conv_props_poolLateV2(patch_size, min_feature_map_size, max_numpool, spacing):
"""
:param spacing:
:param patch_size:
:param min_feature_map_size: min edge length of feature maps in bottleneck
:return:
"""
initial_spacing = deepcopy(spacing)
reach = max(initial_spacing)
dim = len(patch_size)
num_pool_per_axis = get_network_numpool(patch_size, max_numpool, min_feature_map_size)
net_num_pool_op_kernel_sizes = []
net_conv_kernel_sizes = []
net_numpool = max(num_pool_per_axis)
current_spacing = spacing
for p in range(net_numpool):
reached = [current_spacing[i] / reach > 0.5 for i in range(dim)]
pool = [2 if num_pool_per_axis[i] + p >= net_numpool else 1 for i in range(dim)]
if all(reached):
conv = [3] * dim
else:
conv = [3 if not reached[i] else 1 for i in range(dim)]
net_num_pool_op_kernel_sizes.append(pool)
net_conv_kernel_sizes.append(conv)
current_spacing = [i * j for i, j in zip(current_spacing, pool)]
net_conv_kernel_sizes.append([3] * dim)
must_be_divisible_by = get_shape_must_be_divisible_by(num_pool_per_axis)
patch_size = pad_shape(patch_size, must_be_divisible_by)
# we need to add one more conv_kernel_size for the bottleneck. We always use 3x3(x3) conv here
return num_pool_per_axis, net_num_pool_op_kernel_sizes, net_conv_kernel_sizes, patch_size, must_be_divisible_by
def get_pool_and_conv_props(spacing, patch_size, min_feature_map_size, max_numpool):
"""
:param spacing:
:param patch_size:
:param min_feature_map_size: min edge length of feature maps in bottleneck
:return:
"""
dim = len(spacing)
current_spacing = deepcopy(list(spacing))
current_size = deepcopy(list(patch_size))
pool_op_kernel_sizes = []
conv_kernel_sizes = []
num_pool_per_axis = [0] * dim
while True:
# This is a problem because sometimes we have spacing 20, 50, 50 and we want to still keep pooling.
# Here we would stop however. This is not what we want! Fixed in get_pool_and_conv_propsv2
min_spacing = min(current_spacing)
valid_axes_for_pool = [i for i in range(dim) if current_spacing[i] / min_spacing < 2]
axes = []
for a in range(dim):
my_spacing = current_spacing[a]
partners = [i for i in range(dim) if current_spacing[i] / my_spacing < 2 and my_spacing / current_spacing[i] < 2]
if len(partners) > len(axes):
axes = partners
conv_kernel_size = [3 if i in axes else 1 for i in range(dim)]
# exclude axes that we cannot pool further because of min_feature_map_size constraint
#before = len(valid_axes_for_pool)
valid_axes_for_pool = [i for i in valid_axes_for_pool if current_size[i] >= 2*min_feature_map_size]
#after = len(valid_axes_for_pool)
#if after == 1 and before > 1:
# break
valid_axes_for_pool = [i for i in valid_axes_for_pool if num_pool_per_axis[i] < max_numpool]
if len(valid_axes_for_pool) == 0:
break
#print(current_spacing, current_size)
other_axes = [i for i in range(dim) if i not in valid_axes_for_pool]
pool_kernel_sizes = [0] * dim
for v in valid_axes_for_pool:
pool_kernel_sizes[v] = 2
num_pool_per_axis[v] += 1
current_spacing[v] *= 2
current_size[v] = np.ceil(current_size[v] / 2)
for nv in other_axes:
pool_kernel_sizes[nv] = 1
pool_op_kernel_sizes.append(pool_kernel_sizes)
conv_kernel_sizes.append(conv_kernel_size)
#print(conv_kernel_sizes)
must_be_divisible_by = get_shape_must_be_divisible_by(num_pool_per_axis)
patch_size = pad_shape(patch_size, must_be_divisible_by)
# we need to add one more conv_kernel_size for the bottleneck. We always use 3x3(x3) conv here
conv_kernel_sizes.append([3]*dim)
return num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, must_be_divisible_by
def get_pool_and_conv_props_v2(spacing, patch_size, min_feature_map_size, max_numpool):
"""
:param spacing:
:param patch_size:
:param min_feature_map_size: min edge length of feature maps in bottleneck
:return:
"""
dim = len(spacing)
current_spacing = deepcopy(list(spacing))
current_size = deepcopy(list(patch_size))
pool_op_kernel_sizes = []
conv_kernel_sizes = []
num_pool_per_axis = [0] * dim
kernel_size = [1] * dim
while True:
# exclude axes that we cannot pool further because of min_feature_map_size constraint
valid_axes_for_pool = [i for i in range(dim) if current_size[i] >= 2*min_feature_map_size]
if len(valid_axes_for_pool) < 1:
break
spacings_of_axes = [current_spacing[i] for i in valid_axes_for_pool]
# find axis that are within factor of 2 within smallest spacing
min_spacing_of_valid = min(spacings_of_axes)
valid_axes_for_pool = [i for i in valid_axes_for_pool if current_spacing[i] / min_spacing_of_valid < 2]
# max_numpool constraint
valid_axes_for_pool = [i for i in valid_axes_for_pool if num_pool_per_axis[i] < max_numpool]
if len(valid_axes_for_pool) == 1:
if current_size[valid_axes_for_pool[0]] >= 3 * min_feature_map_size:
pass
else:
break
if len(valid_axes_for_pool) < 1:
break
# now we need to find kernel sizes
# kernel sizes are initialized to 1. They are successively set to 3 when their associated axis becomes within
# factor 2 of min_spacing. Once they are 3 they remain 3
for d in range(dim):
if kernel_size[d] == 3:
continue
else:
if spacings_of_axes[d] / min(current_spacing) < 2:
kernel_size[d] = 3
other_axes = [i for i in range(dim) if i not in valid_axes_for_pool]
pool_kernel_sizes = [0] * dim
for v in valid_axes_for_pool:
pool_kernel_sizes[v] = 2
num_pool_per_axis[v] += 1
current_spacing[v] *= 2
current_size[v] = np.ceil(current_size[v] / 2)
for nv in other_axes:
pool_kernel_sizes[nv] = 1
pool_op_kernel_sizes.append(pool_kernel_sizes)
conv_kernel_sizes.append(deepcopy(kernel_size))
#print(conv_kernel_sizes)
must_be_divisible_by = get_shape_must_be_divisible_by(num_pool_per_axis)
patch_size = pad_shape(patch_size, must_be_divisible_by)
# we need to add one more conv_kernel_size for the bottleneck. We always use 3x3(x3) conv here
conv_kernel_sizes.append([3]*dim)
return num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, must_be_divisible_by
def get_shape_must_be_divisible_by(net_numpool_per_axis):
return 2 ** np.array(net_numpool_per_axis)
def pad_shape(shape, must_be_divisible_by):
"""
pads shape so that it is divisibly by must_be_divisible_by
:param shape:
:param must_be_divisible_by:
:return:
"""
if not isinstance(must_be_divisible_by, (tuple, list, np.ndarray)):
must_be_divisible_by = [must_be_divisible_by] * len(shape)
else:
assert len(must_be_divisible_by) == len(shape)
new_shp = [shape[i] + must_be_divisible_by[i] - shape[i] % must_be_divisible_by[i] for i in range(len(shape))]
for i in range(len(shape)):
if shape[i] % must_be_divisible_by[i] == 0:
new_shp[i] -= must_be_divisible_by[i]
new_shp = np.array(new_shp).astype(int)
return new_shp
def get_network_numpool(patch_size, maxpool_cap=999, min_feature_map_size=4):
network_numpool_per_axis = np.floor([np.log(i / min_feature_map_size) / np.log(2) for i in patch_size]).astype(int)
network_numpool_per_axis = [min(i, maxpool_cap) for i in network_numpool_per_axis]
return network_numpool_per_axis
if __name__ == '__main__':
# trying to fix https://github.com/MIC-DKFZ/nnUNet/issues/261
median_shape = [24, 504, 512]
spacing = [5.9999094, 0.50781202, 0.50781202]
num_pool_per_axis, net_num_pool_op_kernel_sizes, net_conv_kernel_sizes, patch_size, must_be_divisible_by = get_pool_and_conv_props_poolLateV2(median_shape, min_feature_map_size=4, max_numpool=999, spacing=spacing)
|
the-stack_0_15036 | # coding: utf-8
"""
Schemas
The CRM uses schemas to define how custom objects should store and represent information in the HubSpot CRM. Schemas define details about an object's type, properties, and associations. The schema can be uniquely identified by its **object type ID**. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from hubspot.crm.schemas.api_client import ApiClient
from hubspot.crm.schemas.exceptions import ApiTypeError, ApiValueError # noqa: F401
class PublicObjectSchemasApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def purge(self, object_type, **kwargs): # noqa: E501
"""purge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.purge(object_type, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str object_type: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.purge_with_http_info(object_type, **kwargs) # noqa: E501
def purge_with_http_info(self, object_type, **kwargs): # noqa: E501
"""purge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.purge_with_http_info(object_type, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str object_type: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["object_type"]
all_params.extend(["async_req", "_return_http_data_only", "_preload_content", "_request_timeout"])
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method purge" % key)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'object_type' is set
if self.api_client.client_side_validation and ("object_type" not in local_var_params or local_var_params["object_type"] is None): # noqa: E501 # noqa: E501
raise ApiValueError("Missing the required parameter `object_type` when calling `purge`") # noqa: E501
collection_formats = {}
path_params = {}
if "object_type" in local_var_params:
path_params["objectType"] = local_var_params["object_type"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(["*/*"]) # noqa: E501
# Authentication setting
auth_settings = ["hapikey"] # noqa: E501
return self.api_client.call_api(
"/crm/v3/schemas/{objectType}/purge",
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get("_return_http_data_only"), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
|
the-stack_0_15038 | from PyQt5.QtWidgets import *
from PyQt5.QtCore import Qt, QBasicTimer
from PyQt5.QtGui import QPen, QColor, QBrush
import time
class Settings:
BLOCK_WIDTH = 10
BLOCK_HEIGHT = 10
NUM_BLOCKS_X = 50
NUM_BLOCKS_Y = 50
SCREEN_WIDTH = BLOCK_WIDTH * NUM_BLOCKS_X
SCREEN_HEIGHT = BLOCK_HEIGHT * NUM_BLOCKS_Y
class AppScene(QGraphicsScene):
SPEED = 50
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.lines = []
self.create_ant()
self.draw_grid()
self.set_opacity(0.7)
self.ant_x = 50
self.timer = QBasicTimer()
#print(self.itemAt(Settings.NUM_BLOCKS_X // 2, Settings.NUM_BLOCKS_Y // 2, QTransform()))
#time.sleep(1)
#self.move_ant(Settings.BLOCK_HEIGHT*9, 0)
self.start()
def start(self):
self.addItem(self.ant)
self.timer.start(self.SPEED, self)
def timerEvent(self, event):
if event.timerId() == self.timer.timerId():
self.set_ant(self.ant_x % 500 - 250, -40)
self.ant_x += 10
def move_ant(self, dx, dy):
self.ant.moveBy(dx, dy)
def set_ant(self, x, y):
self.ant.setPos(x, y)
def create_ant(self):
self.ant = QGraphicsEllipseItem(Settings.SCREEN_WIDTH // 2, Settings.SCREEN_HEIGHT // 2, Settings.BLOCK_WIDTH, Settings.BLOCK_HEIGHT)
self.ant.setBrush(QBrush(QColor(0, 0, 255), Qt.SolidPattern))
def draw_grid(self):
width = Settings.SCREEN_WIDTH
height = Settings.SCREEN_HEIGHT
self.setSceneRect(0, 0, width, height)
pen = QPen(QColor(100, 100, 100), 1, Qt.SolidLine)
for x in range(0, Settings.NUM_BLOCKS_X + 1):
_x = x * Settings.BLOCK_WIDTH
self.lines.append(self.addLine(_x, 0, _x, height, pen))
for y in range(0, Settings.NUM_BLOCKS_Y + 1):
_y = y * Settings.BLOCK_HEIGHT
self.lines.append(self.addLine(0, _y, width, _y, pen))
def sert_visible(self, visible=True):
for line in self.lines:
line.setVisible(visible)
def draw_rect(self, col, row, color):
col = col * Settings.BLOCK_HEIGHT
row = row * Settings.BLOCK_WIDTH
rect = QGraphicsRectItem(row, col, Settings.BLOCK_WIDTH, Settings.BLOCK_HEIGHT)
rect.setBrush(QBrush(color, Qt.SolidPattern))
self.addItem(rect)
def delete_grid(self):
for line in self.lines:
self.removeItem(line)
del self.lines[:]
def set_opacity(self, opacity):
for line in self.lines:
line.setOpacity(opacity)
class Appview(QGraphicsView):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setFixedSize(Settings.SCREEN_WIDTH + 10, Settings.SCREEN_HEIGHT + 10)
def drawBackground(self, painter, rect):
super().drawBackground(painter, rect)
if __name__ == "__main__":
import sys
sys._excepthook = sys.excepthook
def exception_hook(exctype, value, traceback):
print(exctype, value, traceback)
sys._excepthook(exctype, value, traceback)
sys.exit(1)
sys.excepthook = exception_hook
app = QApplication(sys.argv)
QScene = AppScene()
win = Appview()
win.setScene(QScene)
win.show()
sys.exit(app.exec_())
|
the-stack_0_15039 | import os
import shutil
from conans.client.source import complete_recipe_sources
from conans.errors import ConanException
from conans.model.ref import ConanFileReference, PackageReference
from conans.util.files import rmdir
def _prepare_sources(cache, ref, remote_manager, loader, remotes):
conan_file_path = cache.package_layout(ref).conanfile()
conanfile = loader.load_class(conan_file_path)
complete_recipe_sources(remote_manager, cache, conanfile, ref, remotes)
return conanfile.short_paths
def _get_package_ids(cache, ref, package_ids):
if not package_ids:
return []
if package_ids is True:
packages = cache.package_layout(ref).packages()
if os.path.exists(packages):
package_ids = os.listdir(packages)
else:
package_ids = []
return package_ids
def cmd_copy(ref, user_channel, package_ids, cache, user_io, remote_manager, loader, remotes,
force=False):
"""
param package_ids: Falsey=do not copy binaries. True=All existing. []=list of ids
"""
# It is important to get the revision early, so "complete_recipe_sources" can
# get the right revision sources, not latest
layout = cache.package_layout(ref)
src_metadata = layout.load_metadata()
ref = ref.copy_with_rev(src_metadata.recipe.revision)
short_paths = _prepare_sources(cache, ref, remote_manager, loader, remotes)
package_ids = _get_package_ids(cache, ref, package_ids)
package_copy(ref, user_channel, package_ids, cache, user_io, short_paths, force)
def package_copy(src_ref, user_channel, package_ids, cache, user_io, short_paths=False,
force=False):
dest_ref = ConanFileReference.loads("%s/%s@%s" % (src_ref.name,
src_ref.version,
user_channel))
# Generate metadata
src_layout = cache.package_layout(src_ref, short_paths)
src_metadata = src_layout.load_metadata()
dst_layout = cache.package_layout(dest_ref, short_paths)
# Copy export
export_origin = src_layout.export()
if not os.path.exists(export_origin):
raise ConanException("'%s' doesn't exist" % str(src_ref))
export_dest = dst_layout.export()
if os.path.exists(export_dest):
if not force and not user_io.request_boolean("'%s' already exist. Override?"
% str(dest_ref)):
return
rmdir(export_dest)
shutil.copytree(export_origin, export_dest, symlinks=True)
user_io.out.info("Copied %s to %s" % (str(src_ref), str(dest_ref)))
export_sources_origin = src_layout.export_sources()
export_sources_dest = dst_layout.export_sources()
if os.path.exists(export_sources_dest):
rmdir(export_sources_dest)
shutil.copytree(export_sources_origin, export_sources_dest, symlinks=True)
user_io.out.info("Copied sources %s to %s" % (str(src_ref), str(dest_ref)))
# Copy packages
package_revisions = {} # To be stored in the metadata
for package_id in package_ids:
pref_origin = PackageReference(src_ref, package_id)
pref_dest = PackageReference(dest_ref, package_id)
package_path_origin = src_layout.package(pref_origin)
package_path_dest = dst_layout.package(pref_dest)
if os.path.exists(package_path_dest):
if not force and not user_io.request_boolean("Package '%s' already exist."
" Override?" % str(package_id)):
continue
rmdir(package_path_dest)
package_revisions[package_id] = (src_metadata.packages[package_id].revision,
src_metadata.recipe.revision)
shutil.copytree(package_path_origin, package_path_dest, symlinks=True)
user_io.out.info("Copied %s to %s" % (str(package_id), str(dest_ref)))
# Generate the metadata
with dst_layout.update_metadata() as metadata:
metadata.recipe.revision = src_metadata.recipe.revision
for package_id, (revision, recipe_revision) in package_revisions.items():
metadata.packages[package_id].revision = revision
metadata.packages[package_id].recipe_revision = recipe_revision
|
the-stack_0_15040 | import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="color",
parent_name="layout.xaxis.rangeselector.font",
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
the-stack_0_15044 | import cv2
cv2_data_dir = '/usr/local/lib/python3.7/dist-packages/cv2/data/'
face_cascade = cv2.CascadeClassifier(cv2_data_dir + 'haarcascade_frontalface_default.xml')
img = cv2.imread('faces.jpg', cv2.IMREAD_GRAYSCALE)
scale_factor = 1.4
min_neighbours = 5
faces = face_cascade.detectMultiScale(img, scale_factor, min_neighbours)
print(faces)
for (x,y,w,h) in faces:
img = cv2.rectangle(img, (x,y), (x+w,y+h), (255, 255, 255), 2)
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
the-stack_0_15046 | import base64
import datetime
import plotly
import plotly.figure_factory as ff
import os
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import json
import dashUI.run as run
import dashUI.run_OD as run_OD
iterationList = []
lossList = []
epochOfLossList = []
epochOfTop1ErrorList = []
epochOfMeanAPList = []
TrainSet_top1_error_valueList = []
ValidationSet_top1_error_valueList = []
TrainSet_mean_ap_valueList = []
ValidationSet_mean_ap_valueList = []
metricList = []
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
image_filename = 'C:/Users/930415/PycharmProjects/chadle/dashUI/icon.png' # replace with your own image
encoded_image = base64.b64encode(open(image_filename, 'rb').read())
CLProjectNames = ','.join(run.CLProjectList)
ODProjectNames = ','.join(run.ODProjectList)
app.layout = html.Div([
html.Center(html.Img(src='data:image/png;base64,{}'.format(encoded_image.decode()), width='80', height='70')),
# Title
html.H1('CHaDLE ',
style={
"font": 'verdana',
'textAlign': 'center',
'color': 'Black'
}
),
# Tabs for CL and OD
html.Div([
dcc.Tabs(id='AllTab', value='AllTab', children=[
# Classification Tab
dcc.Tab(label='Classification', value='CLTab', children=[
html.Div([html.Th(children='Available Classification Projects: ' + CLProjectNames, colSpan="1"),
html.Br(),
"Project Name:",
dcc.Input(
id='ProjectName_CL', value='Animals', type='text'
),
html.Br(),
"Training Device:", dcc.RadioItems(
id='Runtime_CL',
options=[{'label': i, 'value': i} for i in ['cpu', 'gpu']],
value='cpu',
labelStyle={'display': 'inline-block'}
),
"Pretrained Model:", dcc.Dropdown(
id='PretrainedModel_CL',
options=[{'label': i, 'value': i} for i in ["classifier_enhanced", "classifier_compact"]],
value='classifier_compact'
),
],
style={'width': '25%', 'display': 'inline-block'}
),
html.Br(),
html.Br(),
html.Div([
html.Div([
html.Label('Image Width'),
dcc.Input(id='ImWidth_CL', value='100 ', type='number', min=0, step=1, ),
html.Label('Image Height'),
dcc.Input(id='ImHeight_CL', value='100', type='number', min=0, step=1, ),
html.Label('Image Channel'),
dcc.Input(id='ImChannel_CL', value='3', type='number', min=0, step=1, ),
html.Label('Batch Size'),
dcc.Input(id='BatchSize_CL', value='1', type='number', min=0, step=1, ),
html.Label('Initial Learning Rate'),
dcc.Input(id='InitialLearningRate_CL', value='0.001', type='number', min=0, step=0.00001, ),
html.Label('Momentum'),
dcc.Input(id='Momentum_CL', value='0.09', type='number', min=0, step=0.00001, ),
html.Label('Number of Epochs'),
dcc.Input(id='NumEpochs_CL', value='2', type='number', min=0, step=1, ),
html.Label('Change Learning Rate @ Epochs'),
dcc.Input(id='ChangeLearningRateEpochs_CL', value='50,100', type='text'),
html.Label('Learning Rate Schedule'),
dcc.Input(id='lr_change_CL', value='0.01,0.05', type='text'),
html.Label('Regularisation Constant'),
dcc.Input(id='WeightPrior_CL', value='0.001', type='number', min=0, step=0.00001, ),
html.Label('Class Penalty'),
dcc.Input(id='class_penalty_CL', value='0,0', type='text'),
],
style={'width': '20%', 'display': 'inline-block'}),
html.Div([
html.Label('Augmentation Percentage'),
dcc.Input(id='AugmentationPercentage_CL', value='100', type='number', min=0, max=100, step=1, ),
html.Label('Rotation'),
dcc.Input(id='Rotation_CL', value='90', type='number', min=-180, max=180, step=90, ),
html.Label('Mirror (off,c,r,rc)'),
dcc.Input(id='mirror_CL', value='off', type='text', ),
html.Label('Brightness Variation'),
dcc.Input(id='BrightnessVariation_CL', value='1', type='number', min=-100, max=100, step=1, ),
html.Label('Brightness Variation Spot'),
dcc.Input(id='BrightnessVariationSpot_CL', value='1', type='number', min=-100, max=100,
step=1, ),
html.Label('Rotation Range (Step of 1)'),
dcc.Input(id='RotationRange_CL', value='1', type='number', min=1, step=1, ),
# html.Label('Ignore Direction'),
# dcc.Input(id='IgnoreDirection', value='false', type='text'),
# html.Label('Class IDs No Orientation Exist'),
# dcc.Input(id='ClassIDsNoOrientationExist', value='false', type='text'),
# html.Label('Class IDs No Orientation'),
# dcc.Input(id='ClassIDsNoOrientation', value='[]', type='text'),
],
style={'width': '20%', 'float': 'left', 'display': 'inline-block'}),
html.Div([html.H4('Evaluation'),
html.Div(id='evaluation_text_CL'),
dcc.Graph(id='evaluation_graph_CL'),
],
style={'width': '50%', 'float': 'right', }),
dcc.Interval(
id='interval-evaluation_CL',
interval=1 * 1000, # in milliseconds
n_intervals=0
)
]),
html.Br(),
html.Br(),
dcc.Loading(
id="loading-1",
type="default",
children=[html.Div(id="Training_loading_CL"),
html.Div(id="Evaluation_loading_CL")]
),
html.Div([
# html.Button(id='submit-button-state', n_clicks=0, children='Submit'),
html.Button(id='operation_button_CL', n_clicks=0, children='Start Training'),
# html.Button(id='train_button', n_clicks=0, children='Train'),
# html.Button(id='parameters_out_button', n_clicks=0, children='Output Parameters'),
html.Button(id='evaluation_button_CL', n_clicks=0, children='Evaluation'),
],
style={
'width': '70%', 'float': 'right',
}
),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Div(id='Operation_output_CL'),
html.Div(id='makeJson_CL'),
# html.Div(id='output-state'),
# html.Div(id='Train Result'),
# html.Div(id='Evaluation Result'),
# Graph Plotter
html.Div([
html.H1('CHaDLE Training Monitor - Classification',
style={
"font": 'Helvetica',
'textAlign': 'center',
'color': 'Black'
}),
html.Div(id='metrics_CL',
style={
"font": 'Helvetica',
'textAlign': 'center',
'color': 'Blue'
}),
dcc.Graph(id='iteration_loss_graph_CL'),
dcc.Graph(id='top1_error_graph_CL'),
dcc.Interval(
id='interval_graph_CL',
interval=1 * 1000, # in milliseconds
n_intervals=0
)
])
]),
# Object Detection Tab
dcc.Tab(label='Object Detection', value='ODTab', children=[
# Basic inputs
html.Div([html.Th(children='Available Object Detection Projects: ' + ODProjectNames, colSpan="1"),
html.Br(),
"Project Name:",
dcc.Input(
id='ProjectName_OD', value='NTBW', type='text'
),
html.Br(),
"Training Device:", dcc.RadioItems(
id='Runtime_OD',
options=[{'label': i, 'value': i} for i in ['cpu', 'gpu']],
value='cpu',
labelStyle={'display': 'inline-block'}
),
"Pretrained Model:", dcc.Dropdown(
id='PretrainedModel_OD',
options=[{'label': i, 'value': i} for i in ["classifier_enhanced", "classifier_compact"]],
value='classifier_compact'
),
], style={'width': '15%', 'display': 'inline-block'}),
html.Br(),
html.Br(),
# Parameters inputs
html.Div([
html.Div([
html.Label('Number of Classes'),
dcc.Input(id='NumClasses_OD', value='5', type='number', min=0, step=1, ),
html.Label('Image Width'),
dcc.Input(id='ImWidth_OD', value='960', type='number', min=0, step=1, ),
html.Label('Image Height'),
dcc.Input(id='ImHeight_OD', value='1024', type='number', min=0, step=1, ),
html.Label('Image Channel'),
dcc.Input(id='ImChannel_OD', value='3', type='number', min=0, step=1, ),
html.Label('Capacity'),
dcc.Input(id='Capacity_OD', value='medium', type='text', min=0, step=1, ),
html.Label('Instance Type'),
dcc.Input(id='InstanceType_OD', value='rectangle1', type='text', min=0, step=1, ),
html.Label('Training Percent'),
dcc.Input(id='TrainingPercent_OD', value='75', type='number', min=0, step=1, ),
html.Label('Validation Percent'),
dcc.Input(id='ValidationPercent_OD', value='15', type='number', min=0, step=1, ),
],
style={'width': '15%', 'display': 'inline-block'}),
html.Div([
html.Label('Batch Size'),
dcc.Input(id='BatchSize_OD', value='10', type='number', min=0, step=1, ),
html.Label('Initial Learning Rate'),
dcc.Input(id='InitialLearningRate_OD', value='0.001', type='number', min=0, step=0.00001, ),
html.Label('Momentum'),
dcc.Input(id='Momentum_OD', value='0.09', type='number', min=0, step=0.00001, ),
html.Label('Number of Epochs'),
dcc.Input(id='NumEpochs_OD', value='2', type='number', min=0, step=1, ),
html.Label('Change Learning Rate @ Epochs'),
dcc.Input(id='ChangeLearningRateEpochs_OD', value='50,100', type='text'),
html.Label('Learning Rate Schedule'),
dcc.Input(id='lr_change_OD', value='0.01,0.05', type='text'),
html.Label('Regularisation Constant'),
dcc.Input(id='WeightPrior_OD', value='0.001', type='number', min=0, step=0.00001, ),
html.Label('Class Penalty'),
dcc.Input(id='class_penalty_OD', value='0,0', type='text'),
],
style={'width': '15%', 'display': 'inline-block'}),
html.Div([
html.Label('Augmentation Percentage'),
dcc.Input(id='AugmentationPercentage_OD', value='100', type='number', min=0, max=100, step=1, ),
html.Label('Rotation'),
dcc.Input(id='Rotation_OD', value='90', type='number', min=-180, max=180, step=90, ),
html.Label('Mirror (off,c,r,rc)'),
dcc.Input(id='mirror_OD', value='off', type='text', ),
html.Label('Brightness Variation'),
dcc.Input(id='BrightnessVariation_OD', value='0', type='number', min=-100, max=100, step=1, ),
html.Label('Brightness Variation Spot'),
dcc.Input(id='BrightnessVariationSpot_OD', value='0', type='number', min=-100, max=100,
step=1, ),
html.Label('Rotation Range (Step of 1)'),
dcc.Input(id='RotationRange_OD', value='10', type='number', min=1, step=1, ),
# html.Label('Ignore Direction'),
# dcc.Input(id='IgnoreDirection', value='false', type='text'),
# html.Label('Class IDs No Orientation Exist'),
# dcc.Input(id='ClassIDsNoOrientationExist', value='false', type='text'),
# html.Label('Class IDs No Orientation'),
# dcc.Input(id='ClassIDsNoOrientation', value='[]', type='text'),
],
style={'width': '15%', 'float': 'initial', 'display': 'inline-block',
}),
# Estimated Value show and input
html.Div([
html.H4('Halcon estimated values'),
html.P('Key in new desired value or leave it empty: '),
html.Br(),
html.Div([html.P('Min Level: '),
html.Div([html.Div(id='MinLevel_OD'), ],
style={"font": 'Helvetica', 'color': 'Blue'}),
dcc.Input(id='MinLevel_Input_OD', placeholder='Integer', type='number', min=0,
step=1,
debounce=True), ]),
html.Br(),
html.Div([html.P('Max Level: '),
html.Div([html.Div(id='MaxLevel_OD'), ],
style={"font": 'Helvetica', 'color': 'Blue'}),
dcc.Input(id='MaxLevel_Input_OD', placeholder='Integer', type='number', min=0,
step=1,
debounce=True), ]),
html.Br(),
html.Div([html.P('Anchor Number of Subscales: '),
html.Div([html.Div(id='AnchorNumSubscales_OD'), ],
style={"font": 'Helvetica', 'color': 'Blue'}),
dcc.Input(id='AnchorNumSubscales_Input_OD', placeholder='Integer', type='number',
min=0,
step=1,
debounce=True), ]),
html.Br(),
html.Div([html.P('Anchor Aspect Ratios (min,max,mean,deviation): '),
html.Div([html.Div(id='AnchorAspectRatios_OD'), ],
style={"font": 'Helvetica', 'color': 'Blue'}),
dcc.Input(id='AnchorAspectRatios_Input_OD',
placeholder='List (0.720, 1.475, 2.125, 2.753)',
type='text', min=0, debounce=True, style={'width': '50%', }), ]),
# if user wanna change, type in the desired value.
# value = Best value among 4 read by halcon
# label the value,
],
style={'width': '40%', 'float': 'right'},
),
]),
html.Br(),
html.Br(),
html.Br(),
dcc.Loading(
id="loading_OD",
type="default",
children=[html.Div(id="Training_loading_OD"),
html.Div(id="Estimate_values_loading_OD")]
),
html.Br(),
# Buttons
html.Div([
html.Button(id='estimate_button_OD', n_clicks=0, children='Halcon Estimate Values'),
html.Button(id='operation_button_OD', n_clicks=0, children='Train'),
# html.Button(id='parameters_out_button', n_clicks=0, children='Output Parameters'),
html.Button(id='evaluation_button_OD', n_clicks=0, children='Evaluation'), ],
style={'display': 'flex',
'justify-content': 'center',
'align-items': 'center',
'height': '100px',
}, ),
html.Div([html.Label(id='training_output_OD'), ], style={'display': 'flex',
'justify-content': 'center',
'align-items': 'center',
'height': '50px',
}, ),
# Evaluation Graph
html.Div([
html.Div([html.H2('Evaluation Graph Coming Soon...',
style={
"font": 'Helvetica',
'textAlign': 'center',
'color': 'Black'
}),
html.Div(id='evaluation_text_OD'),
dcc.Graph(id='evaluation_graph_OD'),
],
style={'width': '100%', 'float': 'initial'}),
dcc.Interval(
id='interval-evaluation_OD',
interval=1 * 1000, # in milliseconds
n_intervals=0
)
], ),
# OD training monitor graph plotter
html.Div([
html.H1('CHaDLE Training Monitor - Object Detection',
style={
"font": 'Helvetica',
'textAlign': 'center',
'color': 'Black'
}),
html.Div(id='metrics_OD',
style={
"font": 'Helvetica',
'textAlign': 'center',
'color': 'Blue'
}),
dcc.Graph(id='iteration_loss_graph_OD'),
dcc.Graph(id='mean_ap_graph_OD'),
dcc.Interval(
id='interval_graph_OD',
interval=1 * 1000, # in milliseconds
n_intervals=0
)
])
]),
]),
]),
])
############################################################################################################
############################################## Call Backs ##################################################
############################################################################################################
############################################ Classification ##################################
@app.callback(Output('Operation_output_CL', 'children'),
Output("Training_loading_CL", "children"),
Input('operation_button_CL', 'n_clicks'),
State('ProjectName_CL', 'value'),
State('Runtime_CL', 'value'),
State('PretrainedModel_CL', 'value'),
State('ImWidth_CL', 'value'),
State('ImHeight_CL', 'value'),
State('ImChannel_CL', 'value'),
State('BatchSize_CL', 'value'),
State('InitialLearningRate_CL', 'value'),
State('Momentum_CL', 'value'),
State('NumEpochs_CL', 'value'),
State('ChangeLearningRateEpochs_CL', 'value'),
State('lr_change_CL', 'value'),
State('WeightPrior_CL', 'value'),
State('class_penalty_CL', 'value'),
State('AugmentationPercentage_CL', 'value'),
State('Rotation_CL', 'value'),
State('mirror_CL', 'value'),
State('BrightnessVariation_CL', 'value'),
State('BrightnessVariationSpot_CL', 'value'),
State('RotationRange_CL', 'value'),
)
def operation_CL(operation_button_CL, ProjectName_CL, Runtime_CL, PretrainedModel_CL, ImWidth_CL, ImHeight_CL,
ImChannel_CL,
BatchSize_CL, InitialLearningRate_CL, Momentum_CL, NumEpochs_CL, ChangeLearningRateEpochs_CL,
lr_change_CL, WeightPrior_CL,
class_penalty_CL, AugmentationPercentage_CL, Rotation_CL, mirror_CL, BrightnessVariation_CL,
BrightnessVariationSpot_CL,
RotationRange_CL):
ctx_operation_CL = dash.callback_context
if not ctx_operation_CL.triggered:
button_id = 'Null'
else:
button_id = ctx_operation_CL.triggered[0]['prop_id'].split('.')[0]
print(button_id)
if button_id == 'Null':
raise PreventUpdate
else:
if button_id == 'operation_button_CL':
pre_process_param = run.pre_process_CL(ProjectName_CL, Runtime_CL, PretrainedModel_CL, ImWidth_CL,
ImHeight_CL,
ImChannel_CL,
BatchSize_CL, InitialLearningRate_CL, Momentum_CL, NumEpochs_CL,
ChangeLearningRateEpochs_CL, lr_change_CL, WeightPrior_CL,
class_penalty_CL, AugmentationPercentage_CL, Rotation_CL, mirror_CL,
BrightnessVariation_CL, BrightnessVariationSpot_CL,
RotationRange_CL)
DLModelHandle = pre_process_param[0][0]
DLDataset = pre_process_param[1][0]
TrainParam = pre_process_param[2][0]
run.training_CL(DLModelHandle, DLDataset, TrainParam)
metricList.append(DLModelHandle)
metricList.append(DLDataset)
metricList.append(TrainParam)
# run.training(templist[-3], templist[-2], templist[-1])
# run.training(templist[0], templist[1], templist[2])
else:
i = 1
# run.training(templist[-3], templist[-2], templist[-1])
return '', ''
@app.callback(Output('evaluation_graph_CL', 'figure'),
Output('Evaluation_loading_CL', 'children'),
Input('evaluation_button_CL', 'n_clicks'),
State('ProjectName_CL', 'value'),
State('Runtime_CL', 'value'),
State('PretrainedModel_CL', 'value'),
State('ImWidth_CL', 'value'),
State('ImHeight_CL', 'value'),
State('ImChannel_CL', 'value'),
State('BatchSize_CL', 'value'),
State('InitialLearningRate_CL', 'value'),
State('Momentum_CL', 'value'),
State('NumEpochs_CL', 'value'),
State('ChangeLearningRateEpochs_CL', 'value'),
State('lr_change_CL', 'value'),
State('WeightPrior_CL', 'value'),
State('class_penalty_CL', 'value'),
State('AugmentationPercentage_CL', 'value'),
State('Rotation_CL', 'value'),
State('mirror_CL', 'value'),
State('BrightnessVariation_CL', 'value'),
State('BrightnessVariationSpot_CL', 'value'),
# State('RotationRange', 'value'),
# State('IgnoreDirection', 'value'),
)
def evaluation_CL(evaluation_button_CL, ProjectName_CL, Runtime_CL, PretrainedModel_CL, ImWidth_CL, ImHeight_CL,
ImChannel_CL,
BatchSize_CL, InitialLearningRate_CL, Momentum_CL, NumEpochs_CL, ChangeLearningRateEpochs_CL,
lr_change_CL, WeightPrior_CL,
class_penalty_CL, AugmentationPercentage_CL, Rotation_CL, mirror_CL, BrightnessVariation_CL,
BrightnessVariationSpot_CL,
):
z = [[0, 0], [0, 0]]
x = ['Confusion Matrix', 'Confusion Matrix']
y = ['Confusion Matrix', 'Confusion Matrix']
# change each element of z to type string for annotations
z_text = [[str(y) for y in x] for x in z]
fig = ff.create_annotated_heatmap([[0, 0], [0, 0]], x=x, y=y, annotation_text=z_text, colorscale='Blues')
ctx_evaluation_CL = dash.callback_context
if not ctx_evaluation_CL.triggered:
button_id = 'Null'
else:
button_id = ctx_evaluation_CL.triggered[0]['prop_id'].split('.')[0]
if button_id == 'evaluation_button_CL':
print('Evaluation Started')
evaluationList = run.evaluation_CL(ProjectName_CL, Runtime_CL, PretrainedModel_CL, ImWidth_CL, ImHeight_CL,
ImChannel_CL,
BatchSize_CL, InitialLearningRate_CL, Momentum_CL, NumEpochs_CL,
ChangeLearningRateEpochs_CL,
lr_change_CL, WeightPrior_CL,
class_penalty_CL, AugmentationPercentage_CL, Rotation_CL, mirror_CL,
BrightnessVariation_CL,
BrightnessVariationSpot_CL,
)
z.clear()
x.clear()
y.clear()
z_text.clear()
confusion_matrix_List = evaluationList[0]
mean_precision = evaluationList[1][0]
mean_recall = evaluationList[2][0]
mean_f_score = evaluationList[3][0]
mean_precision = format(mean_precision, '.3f')
mean_recall = format(mean_recall, '.3f')
mean_f_score = format(mean_f_score, '.3f')
categories = run.getImageCategories(ProjectName_CL, 'Classification')[0]
labels = run.getImageCategories(ProjectName_CL, 'Classification')[1]
# threading.Thread(target=evaluation).start()
length = len(categories)
sublist = [confusion_matrix_List[i:i + length] for i in range(0, len(confusion_matrix_List), length)]
for i in sublist:
z.append(i)
for i in categories:
x.append(i)
y.append(i)
# change each element of z to type string for annotations
# z_text = [[str(y) for y in x] for x in z]
# set up figure
z_text = [[str(y) for y in x] for x in z]
fig = ff.create_annotated_heatmap(z, x=x, y=y, annotation_text=z_text, colorscale='Blues')
# change each element of z to type string for annotations
# add title
fig.update_layout(
title_text='Mean Precision: ' + str(mean_precision) + '\n Mean Recall: ' + str(
mean_recall) + '\n Mean F Score: ' + str(mean_f_score),
)
# add custom xaxis title
fig.add_annotation(dict(font=dict(color="black", size=14),
x=0.5,
y=-0.15,
showarrow=False,
text="Ground Truth",
xref="paper",
yref="paper"))
# add custom yaxis title
fig.add_annotation(dict(font=dict(color="black", size=14),
x=-0.1,
y=0.5,
showarrow=False,
text="Prediction",
textangle=-90,
xref="paper",
yref="paper"))
# adjust margins to make room for yaxis title
fig.update_layout(margin=dict(t=50, l=200))
# add colorbar
fig['data'][0]['showscale'] = True
return fig, ' '
# Historical method to produce json file of input parameters
@app.callback(Output('makeJson_CL', 'children'),
# Input('parameters_out_button', 'n_clicks'),
Input('ProjectName_CL', 'value'),
State('Runtime_CL', 'value'),
State('PretrainedModel_CL', 'value'),
State('ImWidth_CL', 'value'),
State('ImHeight_CL', 'value'),
State('ImChannel_CL', 'value'),
State('BatchSize_CL', 'value'),
State('InitialLearningRate_CL', 'value'),
State('Momentum_CL', 'value'),
State('NumEpochs_CL', 'value'),
State('ChangeLearningRateEpochs_CL', 'value'),
State('lr_change_CL', 'value'),
State('WeightPrior_CL', 'value'),
State('class_penalty_CL', 'value'),
State('AugmentationPercentage_CL', 'value'),
State('Rotation_CL', 'value'),
State('mirror_CL', 'value'),
State('BrightnessVariation_CL', 'value'),
State('BrightnessVariationSpot_CL', 'value'),
State('RotationRange_CL', 'value'),
# State('IgnoreDirection', 'value'),
# State('ClassIDsNoOrientationExist', 'value'),
# State('ClassIDsNoOrientation', 'value'),
)
def makeJson_CL(ProjectName_CL, Runtime_CL, PretrainedModel_CL, ImWidth_CL, ImHeight_CL, ImChannel_CL,
BatchSize_CL, InitialLearningRate_CL, Momentum_CL, NumEpochs_CL,
ChangeLearningRateEpochs_CL, lr_change_CL, WeightPrior_CL,
class_penalty_CL, AugmentationPercentage_CL, Rotation_CL, mirror_CL,
BrightnessVariation_CL, BrightnessVariationSpot_CL,
RotationRange_CL):
ParameterDict = {'ProjectName': ProjectName_CL,
'Runtime': Runtime_CL, 'PretrainedModel': PretrainedModel_CL, 'ImWidth': ImWidth_CL,
'ImHeight': ImHeight_CL,
'ImChannel': ImChannel_CL,
'BatchSize': BatchSize_CL, 'InitialLearningRate': InitialLearningRate_CL, 'Momentum': Momentum_CL,
'NumEpochs': NumEpochs_CL,
'ChangeLearningRateEpochs': ChangeLearningRateEpochs_CL, 'lr_change': lr_change_CL,
'WeightPrior': WeightPrior_CL,
'class_penalty': class_penalty_CL, 'AugmentationPercentage': AugmentationPercentage_CL,
'Rotation': Rotation_CL, 'mirror': mirror_CL,
'BrightnessVariation': BrightnessVariation_CL,
'BrightnessVariationSpot': BrightnessVariationSpot_CL,
'RotationRange': RotationRange_CL, }
ctx = dash.callback_context
if not ctx.triggered:
button_id = 'Null'
else:
button_id = ctx.triggered[0]['prop_id'].split('.')[0]
if button_id == 'parameters_out_button':
with open('parameters_json.txt', 'w') as outfile:
json.dump(ParameterDict, outfile)
return 'To json done!'
@app.callback(Output('metrics_CL', 'children'),
Input('interval_graph_CL', 'n_intervals'))
def update_metrics_CL(n):
# Indication Text configuration
# Extract data from Hdict and show as texts.
style = {'padding': '5px', 'fontSize': '16px'}
get_metrics = run.get_TrainInfo_CL()
if get_metrics:
time_elapsed = get_metrics[0]
time_remaining = get_metrics[1]
epoch_metrics = get_metrics[2]
else:
time_elapsed = 0
time_remaining = 0
epoch_metrics = 0
return [
html.Span('Time Elapsed: {}'.format(str(datetime.timedelta(seconds=int(time_elapsed)))), style=style),
html.Span('Time Remaining: {}'.format(time_remaining), style=style),
html.Span('Current Epoch: {}'.format(epoch_metrics), style=style)
]
# Multiple components can update everytime interval gets fired.
@app.callback(Output('iteration_loss_graph_CL', 'figure'),
Input('interval_graph_CL', 'n_intervals'))
def iteration_loss_graph_CL(n):
# Loss Graph configuration
# Using plotly subplots. May consider changing to others.
iteration_loss_graph_fig = plotly.tools.make_subplots(rows=1, cols=1, vertical_spacing=1)
iteration_loss_graph_fig['layout']['margin'] = {
'l': 80, 'r': 80, 'b': 50, 't': 80, 'autoexpand': False,
}
iteration_loss_graph_fig['layout']['legend'] = {'x': 0, 'y': 1, 'xanchor': 'left', 'title': 'Loss-Iteration Graph'}
iteration_loss_graph_fig.update_layout(legend_title_text=123)
iteration_loss_graph_fig.update_xaxes(title_text="Iteration", row=1, col=1)
iteration_loss_graph_fig.update_yaxes(title_text="Loss", row=1, col=1)
# If Hdict files does not exist, clear graph and lists for plotting.
# Therefore, could reset graph by deleting the Hdict files.
getTrainInfo = run.get_TrainInfo_CL()
if not getTrainInfo:
iterationList.clear()
epochOfLossList.clear()
lossList.clear()
else:
epoch_TrainInfo = getTrainInfo[2]
loss = getTrainInfo[3]
iteration = getTrainInfo[4]
# Avoid duplicate output from Halcon.
# Interval for this web app is set to 1 sec. However feedback from Halcon may take up tp 5 secs.
# Using <in> with list, average time complexity: O(n)
# if iteration not in iterationList:
epochOfLossList.append(epoch_TrainInfo)
lossList.append(loss)
iterationList.append(iteration)
# Add the values to graph and start plotting.
iteration_loss_graph_fig.append_trace({
'x': epochOfLossList,
'y': lossList,
'text': iterationList,
'name': 'iteration vs loss',
'mode': 'lines',
'type': 'scatter'
}, 1, 1)
return iteration_loss_graph_fig
@app.callback(Output('top1_error_graph_CL', 'figure'),
Input('interval_graph_CL', 'n_intervals'))
def top1_error_graph_CL(n):
# Top1 Error Graph configuration.
# Using plotly subplots. May consider changing to others.
top1_error_graph_fig = plotly.tools.make_subplots(rows=1, cols=1, vertical_spacing=1, )
top1_error_graph_fig['layout']['margin'] = {
'l': 80, 'r': 80, 'b': 100, 't': 80, 'autoexpand': False,
}
top1_error_graph_fig['layout']['legend'] = {'x': 0, 'y': 1, 'xanchor': 'left'}
top1_error_graph_fig.update_xaxes(title_text="Epoch", row=1, col=1)
top1_error_graph_fig.update_yaxes(title_text="Top1 Error", row=1, col=1)
# If Hdict files does not exist, clear graph and lists for plotting.
# Therefore, could reset graph by deleting the Hdict files.
getEvaluationInfo = run.get_EvaluationInfo_CL()
if not getEvaluationInfo:
TrainSet_top1_error_valueList.clear()
ValidationSet_top1_error_valueList.clear()
epochOfTop1ErrorList.clear()
else:
epoch_EvaluationInfo = getEvaluationInfo[0]
TrainSet_top1_error_value = getEvaluationInfo[1]
ValidationSet_top1_error_value = getEvaluationInfo[2]
# Avoid duplicate output from Halcon.
# Interval for this web app is set to 1 sec. However feedback from Halcon may take up tp 5 secs.
# Using <in> with list, average time complexity: O(n)
if TrainSet_top1_error_value not in TrainSet_top1_error_valueList:
epochOfTop1ErrorList.append(epoch_EvaluationInfo)
TrainSet_top1_error_valueList.append(TrainSet_top1_error_value)
ValidationSet_top1_error_valueList.append(ValidationSet_top1_error_value)
# Add the values to graph and start plotting.
# Two plots on the same graph.
top1_error_graph_fig.append_trace({
'x': epochOfTop1ErrorList,
'y': TrainSet_top1_error_valueList,
'name': 'Train Set Top1_error',
'mode': 'lines+markers',
'type': 'scatter'
}, 1, 1)
top1_error_graph_fig.append_trace({
'x': epochOfTop1ErrorList,
'y': ValidationSet_top1_error_valueList,
'name': 'Validation Set Top1_error',
'mode': 'lines+markers',
'type': 'scatter'
}, 1, 1)
return top1_error_graph_fig
############################################ Object Detection ################################
@app.callback(Output('MinLevel_OD', 'children'),
Output('MaxLevel_OD', 'children'),
Output('AnchorNumSubscales_OD', 'children'),
Output('AnchorAspectRatios_OD', 'children'),
Output('Estimate_values_loading_OD', 'children'),
Input('estimate_button_OD', 'n_clicks'),
State('ImWidth_OD', 'value'),
State('ImHeight_OD', 'value'),
State('TrainingPercent_OD', 'value'),
State('ValidationPercent_OD', 'value'),
)
def estimate_value_OD(estimate_button_OD, ImWidth_OD, ImHeight_OD, TrainingPercent_OD, ValidationPercent_OD, ):
Label_data_OD = 'C:/Users/930415/Desktop/Chadle_Projects/Chadle_Data/Object_Detection/NTBW_Image Analytics/NTBW_Initial_2.hdict'
ctx_estimate_value_OD = dash.callback_context
if not ctx_estimate_value_OD.triggered:
button_id = 'Null'
else:
button_id = ctx_estimate_value_OD.triggered[0]['prop_id'].split('.')[0]
if button_id == 'estimate_button_OD':
estimate_value = run_OD.estimate_values_OD(ImWidth_OD, ImHeight_OD, TrainingPercent_OD,
ValidationPercent_OD, Label_data_OD)
DLDataset_preprocess = (estimate_value[0])
MinLevel_OD = (estimate_value[1])
MaxLevel_OD = (estimate_value[2])
AnchorNumSubscales_OD = (estimate_value[3])
estimate_value = [round(number, 3) for number in estimate_value[4]]
print(estimate_value)
AnchorAspectRatios_OD_String = ", ".join(str(number) for number in estimate_value)
AnchorAspectRatios_OD = AnchorAspectRatios_OD_String
return MinLevel_OD, MaxLevel_OD, AnchorNumSubscales_OD, AnchorAspectRatios_OD, ' '
else:
return ' ', ' ', ' ', ' ', ' '
@app.callback(Output('training_output_OD', 'children'),
Output('Training_loading_OD', 'children'),
Input('operation_button_OD', 'n_clicks'),
# State('ProjectName_OD', 'value'),
State('ImWidth_OD', 'value'),
State('ImHeight_OD', 'value'),
State('TrainingPercent_OD', 'value'),
State('ValidationPercent_OD', 'value'),
State('MinLevel_Input_OD', 'value'),
State('MaxLevel_Input_OD', 'value'),
State('AnchorNumSubscales_Input_OD', 'value'),
State('AnchorAspectRatios_Input_OD', 'value'),
State('ImChannel_OD', 'value'),
State('PretrainedModel_OD', 'value'),
State('InstanceType_OD', 'value'),
State('NumClasses_OD', 'value'),
State('Capacity_OD', 'value'),
State('AugmentationPercentage_OD', 'value'),
State('Rotation_OD', 'value'),
State('mirror_OD', 'value'),
State('BrightnessVariation_OD', 'value'),
State('BrightnessVariationSpot_OD', 'value'),
State('RotationRange_OD', 'value'),
State('BatchSize_OD', 'value'),
State('InitialLearningRate_OD', 'value'),
State('Momentum_OD', 'value'),
State('NumEpochs_OD', 'value'),
State('ChangeLearningRateEpochs_OD', 'value'),
State('lr_change_OD', 'value'),
State('WeightPrior_OD', 'value'),
State('class_penalty_OD', 'value'),
)
def operation_OD(operation_button_OD, ImWidth_OD, ImHeight_OD, TrainingPercent_OD, ValidationPercent_OD,
MinLevel_Input_OD, MaxLevel_Input_OD, AnchorNumSubscales_Input_OD, AnchorAspectRatios_Input_OD,
ImChannel_OD, PretrainedModel_OD, InstanceType_OD, NumClasses_OD, Capacity_OD,
AugmentationPercentage_OD, Rotation_OD, mirror_OD, BrightnessVariation_OD, BrightnessVariationSpot_OD,
RotationRange_OD, BatchSize_OD, InitialLearningRate_OD, Momentum_OD, NumEpochs_OD,
ChangeLearningRateEpochs_OD,
lr_change_OD, WeightPrior_OD, class_penalty_OD):
Label_data_OD = 'C:/Users/930415/Desktop/Chadle_Projects/Chadle_Data/Object_Detection/NTBW_Image Analytics/NTBW_Initial_2.hdict'
ctx_operation_OD = dash.callback_context
if not ctx_operation_OD.triggered:
button_id = 'Null'
else:
button_id = ctx_operation_OD.triggered[0]['prop_id'].split('.')[0]
if button_id == 'operation_button_OD':
estimate_value = run_OD.estimate_values_OD(ImWidth_OD, ImHeight_OD, TrainingPercent_OD,
ValidationPercent_OD, Label_data_OD)
DLDataset_preprocess = (estimate_value[0])
# If input empty, use Halcon estimate value.
if MinLevel_Input_OD:
MinLevel_OD = MinLevel_Input_OD
else:
MinLevel_OD = (estimate_value[1])
if MaxLevel_Input_OD:
MaxLevel_OD = MaxLevel_Input_OD
else:
MaxLevel_OD = (estimate_value[2])
if AnchorNumSubscales_Input_OD:
AnchorNumSubscales_OD = AnchorNumSubscales_Input_OD
else:
AnchorNumSubscales_OD = (estimate_value[3])
if AnchorAspectRatios_Input_OD:
AnchorAspectRatios_OD = AnchorAspectRatios_Input_OD.split(',')
else:
AnchorAspectRatios_OD = (estimate_value[4])
print(ImChannel_OD)
preprocess_OD = run_OD.preprocess_OD(ImWidth_OD, ImHeight_OD, ImChannel_OD, TrainingPercent_OD,
ValidationPercent_OD, Label_data_OD,
PretrainedModel_OD,
InstanceType_OD, DLDataset_preprocess,
MinLevel_OD, MaxLevel_OD,
AnchorNumSubscales_OD, AnchorAspectRatios_OD, NumClasses_OD, Capacity_OD)
DLDatasetFileName = preprocess_OD[0]
DLPreprocessParamFileName = preprocess_OD[1]
ModelFileName = preprocess_OD[2]
prepare_for_training_OD = run_OD.prepare_for_training_OD(AugmentationPercentage_OD, Rotation_OD, mirror_OD,
BrightnessVariation_OD, BrightnessVariationSpot_OD,
RotationRange_OD, BatchSize_OD,
InitialLearningRate_OD, Momentum_OD, NumEpochs_OD,
ChangeLearningRateEpochs_OD,
lr_change_OD, WeightPrior_OD, class_penalty_OD,
DLDatasetFileName, DLPreprocessParamFileName,
ModelFileName)
DLModelHandle = prepare_for_training_OD[0][0]
DLDataset = prepare_for_training_OD[1][0]
TrainParam = prepare_for_training_OD[2][0]
# Training
training_OD = run_OD.training_OD(DLDataset, DLModelHandle, TrainParam)
return ' ', ' '
# OD metrics and graphs
@app.callback(Output('metrics_OD', 'children'),
Input('interval_graph_OD', 'n_intervals'))
def update_metrics_OD(n):
# Indication Text configuration
# Extract data from Hdict and show as texts.
style = {'padding': '5px', 'fontSize': '16px'}
get_metrics = run_OD.get_TrainInfo_OD()
if get_metrics:
time_elapsed = get_metrics[0]
time_remaining = get_metrics[1]
epoch_metrics = get_metrics[2]
else:
time_elapsed = 0
time_remaining = 0
epoch_metrics = 0
return [
html.Span('Time Elapsed: {}'.format(str(datetime.timedelta(seconds=int(time_elapsed)))), style=style),
html.Span('Time Remaining: {}'.format(time_remaining), style=style),
html.Span('Current Epoch: {}'.format(epoch_metrics), style=style)
]
# Multiple components can update everytime interval gets fired.
@app.callback(Output('iteration_loss_graph_OD', 'figure'),
Input('interval_graph_OD', 'n_intervals'))
def iteration_loss_graph_CL(n):
# Loss Graph configuration
# Using plotly subplots. May consider changing to others.
iteration_loss_graph_fig = plotly.tools.make_subplots(rows=1, cols=1, vertical_spacing=1)
iteration_loss_graph_fig['layout']['margin'] = {
'l': 80, 'r': 80, 'b': 50, 't': 80, 'autoexpand': False,
}
iteration_loss_graph_fig['layout']['legend'] = {'x': 0, 'y': 1, 'xanchor': 'left', 'title': 'Loss-Iteration Graph'}
iteration_loss_graph_fig.update_layout(legend_title_text=123)
iteration_loss_graph_fig.update_xaxes(title_text="Iteration", row=1, col=1)
iteration_loss_graph_fig.update_yaxes(title_text="Loss", row=1, col=1)
# If Hdict files does not exist, clear graph and lists for plotting.
# Therefore, could reset graph by deleting the Hdict files.
getTrainInfo = run_OD.get_TrainInfo_OD()
if not getTrainInfo:
iterationList.clear()
epochOfLossList.clear()
lossList.clear()
else:
epoch_TrainInfo = getTrainInfo[2]
loss = getTrainInfo[3]
iteration = getTrainInfo[4]
# Avoid duplicate output from Halcon.
# Interval for this web app is set to 1 sec. However feedback from Halcon may take up tp 5 secs.
# Using <in> with list, average time complexity: O(n)
if iteration not in iterationList:
epochOfLossList.append(epoch_TrainInfo)
lossList.append(loss)
iterationList.append(iteration)
# Add the values to graph and start plotting.
iteration_loss_graph_fig.append_trace({
'x': epochOfLossList,
'y': lossList,
'text': iterationList,
'name': 'iteration vs loss',
'mode': 'lines',
'type': 'scatter'
}, 1, 1)
return iteration_loss_graph_fig
@app.callback(Output('mean_ap_graph_OD', 'figure'),
Input('interval_graph_OD', 'n_intervals'))
def mean_ap_graph_OD(n):
# Mean AP Graph configuration.
# Using plotly subplots. May consider changing to others.
mean_ap_graph_fig = plotly.tools.make_subplots(rows=1, cols=1, vertical_spacing=1, )
mean_ap_graph_fig['layout']['margin'] = {
'l': 80, 'r': 80, 'b': 100, 't': 80, 'autoexpand': False,
}
mean_ap_graph_fig['layout']['legend'] = {'x': 0, 'y': 1, 'xanchor': 'left'}
mean_ap_graph_fig.update_xaxes(title_text="Epoch", row=1, col=1)
mean_ap_graph_fig.update_yaxes(title_text="Top1 Error", row=1, col=1)
# If Hdict files does not exist, clear graph and lists for plotting.
# Therefore, could reset graph by deleting the Hdict files.
getEvaluationInfo = run_OD.get_EvaluationInfo_OD()
if not getEvaluationInfo:
TrainSet_mean_ap_valueList.clear()
ValidationSet_mean_ap_valueList.clear()
epochOfMeanAPList.clear()
else:
epoch_EvaluationInfo = getEvaluationInfo[0]
TrainSet_mean_ap_value = getEvaluationInfo[1]
ValidationSet_mean_ap_value = getEvaluationInfo[2]
# Avoid duplicate output from Halcon.
# Interval for this web app is set to 1 sec. However feedback from Halcon may take up tp 5 secs.
# Using <in> with list, average time complexity: O(n)
# if TrainSet_mean_ap_value not in TrainSet_mean_ap_valueList:
epochOfMeanAPList.append(epoch_EvaluationInfo)
TrainSet_mean_ap_valueList.append(TrainSet_mean_ap_value)
ValidationSet_mean_ap_valueList.append(ValidationSet_mean_ap_value)
# Add the values to graph and start plotting.
# Two plots on the same graph.
mean_ap_graph_fig.append_trace({
'x': epochOfMeanAPList,
'y': TrainSet_mean_ap_valueList,
'name': 'Train Set Top1_error',
'mode': 'lines+markers',
'type': 'scatter'
}, 1, 1)
mean_ap_graph_fig.append_trace({
'x': epochOfMeanAPList,
'y': ValidationSet_mean_ap_valueList,
'name': 'Validation Set Top1_error',
'mode': 'lines+markers',
'type': 'scatter'
}, 1, 1)
return mean_ap_graph_fig
if __name__ == '__main__':
app.run_server(debug=True)
|
the-stack_0_15048 | # Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from datetime import date
from datetime import datetime
from freezegun import freeze_time
from mock import patch
from ggrc.app import db
from ggrc.notifications import common
from ggrc.models import Notification
from ggrc.models import Person
from ggrc.models import all_models
from ggrc_workflows.models import Cycle
from integration.ggrc import TestCase
from integration.ggrc.access_control import acl_helper
from integration.ggrc.api_helper import Api
from integration.ggrc.generator import ObjectGenerator
from integration.ggrc_workflows.generator import WorkflowsGenerator
class TestOneTimeWorkflowNotification(TestCase):
""" This class contains simple one time workflow tests that are not
in the gsheet test grid
"""
def setUp(self):
super(TestOneTimeWorkflowNotification, self).setUp()
self.api = Api()
self.wf_generator = WorkflowsGenerator()
self.object_generator = ObjectGenerator()
self.random_objects = self.object_generator.generate_random_objects()
self.random_people = self.object_generator.generate_random_people(
user_role="Administrator"
)
self.create_test_cases()
def init_decorator(init):
def new_init(self, *args, **kwargs):
init(self, *args, **kwargs)
if hasattr(self, "created_at"):
self.created_at = datetime.now()
return new_init
Notification.__init__ = init_decorator(Notification.__init__)
def test_one_time_wf_activate(self):
def get_person(person_id):
return db.session.query(Person).filter(Person.id == person_id).one()
with freeze_time("2015-04-10"):
_, wf = self.wf_generator.generate_workflow(self.one_time_workflow_1)
_, cycle = self.wf_generator.generate_cycle(wf)
self.wf_generator.activate_workflow(wf)
person_2 = get_person(self.random_people[2].id)
with freeze_time("2015-04-11"):
_, notif_data = common.get_daily_notifications()
self.assertIn(person_2.email, notif_data)
self.assertIn("cycle_started", notif_data[person_2.email])
self.assertIn(cycle.id, notif_data[person_2.email]["cycle_started"])
self.assertIn("my_tasks",
notif_data[person_2.email]["cycle_data"][cycle.id])
person_1 = get_person(self.random_people[0].id)
with freeze_time("2015-05-03"): # two days befor due date
_, notif_data = common.get_daily_notifications()
self.assertIn(person_1.email, notif_data)
self.assertNotIn("due_in", notif_data[person_1.email])
self.assertNotIn("due_today", notif_data[person_1.email])
with freeze_time("2015-05-04"): # one day befor due date
_, notif_data = common.get_daily_notifications()
self.assertEqual(len(notif_data[person_1.email]["due_in"]), 1)
with freeze_time("2015-05-05"): # due date
_, notif_data = common.get_daily_notifications()
self.assertEqual(len(notif_data[person_1.email]["due_today"]), 1)
@patch("ggrc.notifications.common.send_email")
def test_one_time_wf_activate_single_person(self, mock_mail):
with freeze_time("2015-04-10"):
user = "[email protected]"
_, wf = self.wf_generator.generate_workflow(
self.one_time_workflow_single_person)
_, cycle = self.wf_generator.generate_cycle(wf)
self.wf_generator.activate_workflow(wf)
with freeze_time("2015-04-11"):
_, notif_data = common.get_daily_notifications()
self.assertIn("cycle_started", notif_data[user])
self.assertIn(cycle.id, notif_data[user]["cycle_started"])
self.assertIn("my_tasks", notif_data[user]["cycle_data"][cycle.id])
self.assertIn("cycle_tasks", notif_data[user]["cycle_data"][cycle.id])
self.assertIn(
"my_task_groups", notif_data[user]["cycle_data"][cycle.id])
self.assertIn("cycle_url", notif_data[user]["cycle_started"][cycle.id])
cycle = Cycle.query.get(cycle.id)
cycle_data = notif_data[user]["cycle_data"][cycle.id]
for task in cycle.cycle_task_group_object_tasks:
self.assertIn(task.id, cycle_data["my_tasks"])
self.assertIn(task.id, cycle_data["cycle_tasks"])
self.assertIn("title", cycle_data["my_tasks"][task.id])
self.assertIn("title", cycle_data["cycle_tasks"][task.id])
self.assertIn("cycle_task_url", cycle_data["cycle_tasks"][task.id])
with freeze_time("2015-05-03"): # two days before due date
_, notif_data = common.get_daily_notifications()
self.assertIn(user, notif_data)
self.assertNotIn("due_in", notif_data[user])
self.assertNotIn("due_today", notif_data[user])
with freeze_time("2015-05-04"): # one day before due date
_, notif_data = common.get_daily_notifications()
self.assertEqual(len(notif_data[user]["due_in"]), 2)
with freeze_time("2015-05-05"): # due date
_, notif_data = common.get_daily_notifications()
self.assertEqual(len(notif_data[user]["due_today"]), 2)
common.send_daily_digest_notifications()
self.assertEqual(mock_mail.call_count, 1)
def create_test_cases(self):
def person_dict(person_id):
return {
"href": "/api/people/%d" % person_id,
"id": person_id,
"type": "Person"
}
role_id = all_models.AccessControlRole.query.filter(
all_models.AccessControlRole.name == "Task Assignees",
all_models.AccessControlRole.object_type == "TaskGroupTask",
).one().id
self.one_time_workflow_1 = {
"title": "one time test workflow",
"description": "some test workflow",
"notify_on_change": True,
# admin will be current user with id == 1
"task_groups": [{
"title": "one time task group",
"contact": person_dict(self.random_people[2].id),
"task_group_tasks": [{
"title": "task 1",
"description": "some task",
"access_control_list": [
acl_helper.get_acl_json(role_id, self.random_people[0].id)
],
"start_date": date(2015, 5, 1), # friday
"end_date": date(2015, 5, 5),
}, {
"title": "task 2",
"description": "some task",
"access_control_list": [
acl_helper.get_acl_json(role_id, self.random_people[1].id)
],
"start_date": date(2015, 5, 4),
"end_date": date(2015, 5, 7),
}],
"task_group_objects": self.random_objects[:2]
}, {
"title": "another one time task group",
"contact": person_dict(self.random_people[2].id),
"task_group_tasks": [{
"title": "task 1 in tg 2",
"description": "some task",
"access_control_list": [
acl_helper.get_acl_json(role_id, self.random_people[0].id)
],
"start_date": date(2015, 5, 8), # friday
"end_date": date(2015, 5, 12),
}, {
"title": "task 2 in tg 2",
"description": "some task",
"access_control_list": [
acl_helper.get_acl_json(role_id, self.random_people[2].id)
],
"start_date": date(2015, 5, 1), # friday
"end_date": date(2015, 5, 5),
}],
"task_group_objects": []
}]
}
user = Person.query.filter(Person.email == "[email protected]").one().id
self.one_time_workflow_single_person = {
"title": "one time test workflow",
"notify_on_change": True,
"description": "some test workflow",
# admin will be current user with id == 1
"task_groups": [{
"title": "one time task group",
"contact": person_dict(user),
"task_group_tasks": [{
"title": u"task 1 \u2062 WITH AN UMBRELLA ELLA ELLA. \u2062",
"description": "some task. ",
"access_control_list": [
acl_helper.get_acl_json(role_id, user)
],
"start_date": date(2015, 5, 1), # friday
"end_date": date(2015, 5, 5),
}, {
"title": "task 2",
"description": "some task",
"access_control_list": [
acl_helper.get_acl_json(role_id, user)
],
"start_date": date(2015, 5, 4),
"end_date": date(2015, 5, 7),
}],
"task_group_objects": self.random_objects[:2]
}, {
"title": "another one time task group",
"contact": person_dict(user),
"task_group_tasks": [{
"title": u"task 1 \u2062 WITH AN UMBRELLA ELLA ELLA. \u2062",
"description": "some task",
"access_control_list": [
acl_helper.get_acl_json(role_id, user)
],
"start_date": date(2015, 5, 8), # friday
"end_date": date(2015, 5, 12),
}, {
"title": "task 2 in tg 2",
"description": "some task",
"access_control_list": [
acl_helper.get_acl_json(role_id, user)
],
"start_date": date(2015, 5, 1), # friday
"end_date": date(2015, 5, 5),
}],
"task_group_objects": []
}]
}
|
the-stack_0_15049 | #!/usr/bin/env python3
import argparse
import logging
from db_test_meter.database import Database
from db_test_meter.util import init_logger, collect_user_input, AppConfig
def create_db(db: Database) -> None:
"""
Utility to create the db and table for the sync check
:param db:
:return:
"""
try:
log.debug(f'creating database {AppConfig.TEST_DB_NAME}')
db.run_query(f"DROP DATABASE IF EXISTS {AppConfig.TEST_DB_NAME}")
db.run_query(f"CREATE DATABASE IF NOT EXISTS {AppConfig.TEST_DB_NAME}")
log.debug(f'creating table {AppConfig.TEST_DB_TABLE}')
db.run_query(
f"CREATE TABLE {AppConfig.TEST_DB_NAME}.{AppConfig.TEST_DB_TABLE} (`test_run_id` varchar(50) NOT NULL, `index_id` int(10) unsigned NOT NULL, `created` int(8) NOT NULL)")
print(f'Database {AppConfig.TEST_DB_NAME} created')
print(f'Table {AppConfig.TEST_DB_NAME}.{AppConfig.TEST_DB_TABLE} created')
except Exception as e:
print(f'There was an error: {e}')
parser = argparse.ArgumentParser(
'simple utility to create the db and table used by failover_test.py. Usage: ./create_failover_sync_db.py')
parser.add_argument('--debug', action='store_true')
init_logger(debug=parser.parse_args().debug)
log = logging.getLogger()
print('This will destroy and recreate sync database and tracking table')
if (input("enter y to continue, n to exit [n]: ") or 'n').lower() == 'y':
db_connection_metadata = collect_user_input()
db = Database(db_connection_metadata)
create_db(db)
else:
print('exiting...')
|
the-stack_0_15051 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
return -lim, lim
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=400, fc2_units=300, batch_norm=True):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
self.batch_norm = batch_norm
self.fc1 = nn.Linear(state_size, fc1_units)
if batch_norm:
self.bn1 = nn.BatchNorm1d(fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
if batch_norm:
self.bn2 = nn.BatchNorm1d(fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
if self.batch_norm:
x = F.relu(self.bn1(self.fc1(state)))
x = F.relu(self.bn2(self.fc2(x)))
else:
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
return torch.tanh(self.fc3(x))
class Critic(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, fcs1_units=400, fc2_units=300, batch_norm=True):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fcs1_units (int): Number of nodes in the first hidden layer
fc2_units (int): Number of nodes in the second hidden layer
"""
super(Critic, self).__init__()
self.seed = torch.manual_seed(seed)
self.batch_norm = batch_norm
self.fcs1 = nn.Linear(state_size, fcs1_units)
self.bn1 = nn.BatchNorm1d(fcs1_units)
self.fc2 = nn.Linear(fcs1_units+action_size, fc2_units)
self.fc3 = nn.Linear(fc2_units, 1)
self.reset_parameters()
def reset_parameters(self):
self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state, action):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
if self.batch_norm:
xs = F.relu(self.bn1(self.fcs1(state)))
else:
xs = F.relu(self.fcs1(state))
x = torch.cat((xs, action), dim=1)
x = F.relu(self.fc2(x))
return self.fc3(x)
|
the-stack_0_15052 | from functools import reduce
from operator import __mul__
import pytest
from treevalue.tree import func_treelize, TreeValue, method_treelize, classmethod_treelize, delayed
# noinspection DuplicatedCode
@pytest.mark.unittest
class TestTreeFuncFunc:
def test_tree_value_type(self):
class _MyTreeValue(TreeValue):
pass
@func_treelize(return_type=_MyTreeValue)
def ssum(*args):
return sum(args)
t1 = TreeValue({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
t2 = TreeValue({'a': 11, 'b': 22, 'x': {'c': 33, 'd': 44}})
tr1 = ssum(t1, t2)
assert tr1 != TreeValue({'a': 12, 'b': 24, 'x': {'c': 36, 'd': 48}})
assert tr1 == _MyTreeValue({'a': 12, 'b': 24, 'x': {'c': 36, 'd': 48}})
assert isinstance(tr1, _MyTreeValue)
assert isinstance(tr1.x, _MyTreeValue)
@func_treelize(return_type=_MyTreeValue)
def ssum2(*args):
return sum(args), reduce(__mul__, args, 1)
tr2 = ssum2(t1, t2)
assert tr2 == _MyTreeValue({'a': (12, 11), 'b': (24, 44), 'x': {'c': (36, 99), 'd': (48, 176)}})
@func_treelize(return_type=_MyTreeValue, rise=True)
def ssum3(*args):
return sum(args), reduce(__mul__, args, 1)
tr3, tr4 = ssum3(t1, t2)
assert tr3 == _MyTreeValue({'a': 12, 'b': 24, 'x': {'c': 36, 'd': 48}})
assert tr4 == _MyTreeValue({'a': 11, 'b': 44, 'x': {'c': 99, 'd': 176}})
@func_treelize(return_type=_MyTreeValue, subside=True, rise=dict(template=(None, None)))
def ssum4(args):
return sum(args), reduce(__mul__, args, 1)
tr5, tr6 = ssum4([t1, t2])
assert tr5 == _MyTreeValue({'a': 12, 'b': 24, 'x': {'c': 36, 'd': 48}})
assert tr6 == _MyTreeValue({'a': 11, 'b': 44, 'x': {'c': 99, 'd': 176}})
@func_treelize()
def ssum5(a, b, c):
return a + b * c
t3 = TreeValue({'a': 31, 'b': 12, 'x': {'c': 43, 'd': 24}})
assert ssum5(1, c=3, b=5) == 16
assert ssum5(t2, c=t1, b=t3) == TreeValue({
'a': 42,
'b': 46,
'x': {
'c': 162,
'd': 140,
}
})
assert ssum5(t2, c=2, b=t3) == TreeValue({
'a': 73,
'b': 46,
'x': {
'c': 119,
'd': 92,
}
})
@func_treelize('outer', missing=lambda: 1)
def ssum6(a, b, c):
return a + b * c
t4 = TreeValue({'a': 31, 'b': 12, 'x': {'c': 43}})
with pytest.raises(KeyError):
ssum5(t2, c=2, b=t4)
assert ssum6(t2, c=2, b=t4) == TreeValue({
'a': 73,
'b': 46,
'x': {
'c': 119,
'd': 46,
}
})
@func_treelize('left')
def ssum7(a, b, c):
return a + b * c
with pytest.raises(KeyError):
ssum7(t2, c=2, b=t4)
@func_treelize(inherit=False)
def ssum8(a, b, c):
return a + b * c
with pytest.raises(TypeError):
ssum8(t2, c=2, b=t1)
def test_tree_value_type_none(self):
@func_treelize(return_type=None)
def ssum(*args):
return sum(args)
t1 = TreeValue({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
t2 = TreeValue({'a': 11, 'b': 22, 'x': {'c': 33, 'd': 44}})
tr1 = ssum(t1, t2)
assert tr1 is None
def test_tree_value_type_invalid(self):
class _MyTreeValue:
pass
with pytest.raises(TypeError):
# noinspection PyTypeChecker
@func_treelize(return_type=_MyTreeValue)
def ssum(*args):
return sum(args)
with pytest.raises(TypeError):
# noinspection PyTypeChecker
@func_treelize(return_type=233)
def ssum(*args):
return sum(args)
def test_method_treelize(self):
class TreeNumber(TreeValue):
@method_treelize()
def _attr_extern(self, key):
return getattr(self, key)
@method_treelize('outer', missing=0)
def __add__(self, other):
return self + other
@method_treelize('outer', missing=0)
def __radd__(self, other):
return other + self
@method_treelize('outer', missing=0)
def __sub__(self, other):
return self - other
@method_treelize('outer', missing=0)
def __rsub__(self, other):
return other - self
@method_treelize()
def __pos__(self):
return +self
@method_treelize()
def __neg__(self):
return -self
@method_treelize()
def __call__(self, *args, **kwargs):
return self(*args, **kwargs)
@method_treelize(return_type=TreeValue)
def damn_it(self, x):
return self + x
t1 = TreeNumber({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
t2 = TreeNumber({'a': 11, 'b': 22, 'x': {'c': 33, 'd': 5}})
assert (t1 + t2 + 1) == TreeNumber({'a': 13, 'b': 25, 'x': {'c': 37, 'd': 10}})
assert (t1 - t2) == TreeNumber({'a': -10, 'b': -20, 'x': {'c': -30, 'd': -1}})
assert (1 - t2) == TreeNumber({'a': -10, 'b': -21, 'x': {'c': -32, 'd': -4}})
assert t1.damn_it(2) == TreeValue({'a': 3, 'b': 4, 'x': {'c': 5, 'd': 6}})
class P:
def __init__(self, value):
self.__value = value
@property
def value(self):
return self.__value
def vv(self):
return self.__value + 1
ttt = TreeNumber({"a": P(1), "b": P(2), "x": {"c": P(3), "d": P(4)}})
assert ttt.value == TreeNumber({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
assert ttt.vv() == TreeNumber({'a': 2, 'b': 3, 'x': {'c': 4, 'd': 5}})
with pytest.warns(UserWarning):
class MyTreeValue(TreeValue):
@method_treelize(self_copy=True, rise=True)
def __iadd__(self, other):
return self + other
def test_classmethod_treelize(self):
class TestUtils:
@classmethod
@classmethod_treelize('outer', missing=0, return_type=TreeValue)
def add(cls, a, b):
return cls, a + b
@classmethod
@classmethod_treelize(return_type=TreeValue)
def add2(cls, a, b):
return cls, a + b
assert TestUtils.add(1, 2) == (TestUtils, 3)
assert TestUtils.add(TreeValue({'a': 1, 'b': 2}), 2) == TreeValue({'a': (TestUtils, 3), 'b': (TestUtils, 4)})
assert TestUtils.add2(TreeValue({'a': 1, 'b': 2}), TreeValue({'a': 12, 'b': 22})) == TreeValue(
{'a': (TestUtils, 13), 'b': (TestUtils, 24)})
class MyTreeValue(TreeValue):
@classmethod
@classmethod_treelize()
def plus(cls, x, y):
return x + y
assert MyTreeValue.plus(TreeValue({'a': 1, 'b': 2}), 2) == MyTreeValue({'a': 3, 'b': 4})
def test_missing(self):
@func_treelize(mode='outer', missing=lambda: [])
def append(arr: list, *args):
for item in args:
if item:
arr.append(item)
return arr
t0 = TreeValue({})
t1 = TreeValue({'a': 2, 'b': 7, 'x': {'c': 4, 'd': 9}})
t2 = TreeValue({'a': 4, 'b': 48, 'x': {'c': -11, 'd': 54}})
t3 = TreeValue({'a': 9, 'b': -12, 'x': {'c': 3, 'd': 4}})
assert append(t0, t1, t2, t3) == TreeValue({
'a': [2, 4, 9],
'b': [7, 48, -12],
'x': {
'c': [4, -11, 3],
'd': [9, 54, 4],
}
})
t0 = TreeValue({})
t1 = TreeValue({'a': 2, 'x': {'c': 4, 'd': 9}})
t2 = TreeValue({'a': 4, 'b': 48, 'x': {'d': 54}})
t3 = TreeValue({'b': -12, 'x': 7, 'y': {'e': 3, 'f': 4}})
assert append(t0, t1, t2, t3) == TreeValue({
'a': [2, 4],
'b': [48, -12],
'x': {
'c': [4, 7],
'd': [9, 54, 7],
},
'y': {
'e': [3],
'f': [4],
},
})
def test_delay_support(self):
@func_treelize(return_type=TreeValue)
def f(x, y, z):
return x + y * 2 + z * 3
t1 = TreeValue({
'a': 1,
'b': delayed(lambda x: x ** 2, 3),
'c': {'x': 2, 'y': delayed(lambda: 4)},
})
t2 = TreeValue({
'a': delayed(lambda x: x + 1, t1.a),
'b': delayed(lambda: t1.c.y),
'c': delayed(lambda: 5),
})
t3 = delayed(lambda: 6)
assert f(t1, t2, t3) == TreeValue({
'a': 23, 'b': 35,
'c': {'x': 30, 'y': 32},
})
t1 = TreeValue({
'a': 1,
'b': delayed(lambda x: x ** 2, 3),
'c': {'x': 2, 'y': delayed(lambda: 4)},
})
t2 = TreeValue({
'a': delayed(lambda x: x + 1, t1.a),
'b': delayed(lambda: t1.c.y),
'c': delayed(lambda: 5),
})
t3 = delayed(lambda: 6)
assert f(x=t1, y=t2, z=t3) == TreeValue({
'a': 23, 'b': 35,
'c': {'x': 30, 'y': 32},
})
def test_delayed_treelize(self):
t1 = TreeValue({
'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4},
})
t2 = TreeValue({
'a': 11, 'b': 23, 'x': {'c': 35, 'd': 47},
})
cnt_1 = 0
@func_treelize(delayed=True)
def total(a, b):
nonlocal cnt_1
cnt_1 += 1
return a + b
# positional
t3 = total(t1, t2)
assert cnt_1 == 0
assert t3.a == 12
assert cnt_1 == 1
assert t3.x == TreeValue({'c': 38, 'd': 51})
assert cnt_1 == 3
assert t3 == TreeValue({
'a': 12, 'b': 25, 'x': {'c': 38, 'd': 51}
})
assert cnt_1 == 4
# keyword
cnt_1 = 0
t3 = total(a=t1, b=t2)
assert cnt_1 == 0
assert t3.a == 12
assert cnt_1 == 1
assert t3.x == TreeValue({'c': 38, 'd': 51})
assert cnt_1 == 3
assert t3 == TreeValue({
'a': 12, 'b': 25, 'x': {'c': 38, 'd': 51}
})
assert cnt_1 == 4
# positional, with constant
cnt_1 = 0
t3 = total(1, t2)
assert cnt_1 == 0
assert t3.a == 12
assert cnt_1 == 1
assert t3.x == TreeValue({'c': 36, 'd': 48})
assert cnt_1 == 3
assert t3 == TreeValue({
'a': 12, 'b': 24, 'x': {'c': 36, 'd': 48}
})
assert cnt_1 == 4
# keyword, with constant
cnt_1 = 0
t3 = total(b=1, a=t2)
assert cnt_1 == 0
assert t3.a == 12
assert cnt_1 == 1
assert t3.x == TreeValue({'c': 36, 'd': 48})
assert cnt_1 == 3
assert t3 == TreeValue({
'a': 12, 'b': 24, 'x': {'c': 36, 'd': 48}
})
assert cnt_1 == 4
# positional, with delay
cnt_1 = 0
t4 = TreeValue({'v': delayed(lambda: t1)})
t5 = TreeValue({'v': delayed(lambda: t2)})
t6 = total(t4, t5)
assert cnt_1 == 0
assert t6.v.a == 12
assert cnt_1 == 1
assert t6.v.x == TreeValue({'c': 38, 'd': 51})
assert cnt_1 == 3
assert t6 == TreeValue({
'v': {'a': 12, 'b': 25, 'x': {'c': 38, 'd': 51}},
})
assert cnt_1 == 4
# keyword, with delay
cnt_1 = 0
t4 = TreeValue({'v': delayed(lambda: t1)})
t5 = TreeValue({'v': delayed(lambda: t2)})
t6 = total(a=t4, b=t5)
assert cnt_1 == 0
assert t6.v.a == 12
assert cnt_1 == 1
assert t6.v.x == TreeValue({'c': 38, 'd': 51})
assert cnt_1 == 3
assert t6 == TreeValue({
'v': {'a': 12, 'b': 25, 'x': {'c': 38, 'd': 51}},
})
assert cnt_1 == 4
|
the-stack_0_15054 | __author__ = 'Simon'
import json
import re
import urllib.request
import html
fnc_base_url = 'http://cbateam.github.io/CBA_A3/docs/index/'
fnc_page_names = ['Functions', 'Functions2', 'Functions3']
fncPrefix = 'CBA_fnc_'
macro_base_url = 'http://cbateam.github.io/CBA_A3/docs/files/main/script_macros_common-hpp.html'
f = urllib.request.urlopen(macro_base_url)
content = f.read().decode("utf-8")
f.close()
allMacros = re.findall(r'<div class=CTopic><h3 class=CTitle><a name="[^"]*"></a>([^<]*)</h3><div class=CBody>(.*?)</div>',content,re.DOTALL)
for macroContent in allMacros:
c = re.search(r'(.*?)(<h4 class=CHeading>Parameters</h4>.*?)?(<h4 class=CHeading>Example</h4>.*?)?(?:<h4 class=CHeading>Author</h4>.*?)',macroContent[1],re.DOTALL)
if c:
description = c.group(1)
descriptionTable = re.findall(r'<tr><td class=CDLEntry>(.*?)</td><td class=CDLDescription>(.*?)</td></tr>',macroContent[1],re.DOTALL)
for tableEntry in descriptionTable:
re.search(r'(.*?)(<h4 class=CHeading>Parameters</h4>.*?)?(<h4 class=CHeading>Example</h4>.*?)?(?:<h4 class=CHeading>Author</h4>.*?)',macroContent[1],re.DOTALL)
output = []
functionList = []
for fnc_page in fnc_page_names:
f = urllib.request.urlopen(fnc_base_url + fnc_page + '.html')
content = f.read().decode("utf-8")
f.close()
allFunctions = re.findall(r'<a[^>]*href\s*=\s*"([^"]*)"[^>]*class=ISymbol[^>]*>([^<]*)',content)
for function in allFunctions:
outputTemplate = {}
outputTemplate['rightLabel'] = "CBA Function"
outputTemplate['text'] = ''
outputTemplate['description'] = ''
outputTemplate['type'] = 'function'
outputTemplate['descriptionMoreURL'] = fnc_base_url + function[0]
print(function[1])
functionList.append(function[1])
f = urllib.request.urlopen(outputTemplate['descriptionMoreURL'])
content = f.read().decode("utf-8")
f.close()
nameRegex = re.search(r'<a name="([^"]*)">',content)
if nameRegex:
outputTemplate['text'] = nameRegex.group(1)
descriptionRegex = re.search(r'<h4 class=CHeading>Description</h4>(.*)<h4 class=CHeading>Parameters</h4>',content)
if descriptionRegex:
outputTemplate['description'] = str(html.unescape(re.sub(r'(<[^<]+?>)','',descriptionRegex.group(1)).strip()))
output.append(outputTemplate)
autocompleteDict = {
'.source.sqf': {
'autocomplete': {
'symbols':{
'CBAfunctions':{
'suggestions': output
}
}
}
}
}
with open('../settingsAvailable/language-sqf-functions-cba.json', 'w') as f:
json.dump(autocompleteDict,f,indent=2)
with open('grammars-sqf-functions-cba.json', 'w') as f:
f.write('|'.join(functionList))
print("\nCopy contents of 'grammars-sqf-functions-cba.json' into the 'support.function.cba.sqf' section of 'grammars/sqf.json'")
|
the-stack_0_15056 | """
Post-processing function that takes a case_data_set and outputs a csv file
"""
import csv
# pylint: disable=E0611,F0401
from openmdao.main.case import flatten_obj
def caseset_query_to_csv(data, filename='cases.csv', delimiter=',', quotechar='"'):
"""
Post-processing function that takes a case_data_set and outputs a csv
file. Should be able to pass tests of current csv case recorder (column
ordering, meta column, etc...) Assume query by case (not variable).
Inputs:
data - results of fetch on Query object
"""
cds = data.cds
drivers = {}
for driver in cds.drivers:
drivers[driver['_id']] = driver['name']
# Determine inputs & outputs, map pseudos to expression names.
expressions = cds.simulation_info['expressions']
metadata = cds.simulation_info['variable_metadata']
inputs = []
outputs = []
pseudos = {}
for name in sorted(data[0].keys()):
# All inputs and outputs that change.
if name in metadata:
if metadata[name]['iotype'] == 'in':
inputs.append(name)
else:
outputs.append(name)
# Include objectives and constraints from all simulation levels.
elif '_pseudo_' in name and not name.endswith('.out0'):
for exp_name, exp_dict in expressions.items():
if exp_dict['pcomp_name'] == name:
pseudos[name] = '%s(%s)' % (exp_dict['data_type'], exp_name)
break
else:
raise RuntimeError('Cannot find %r in expressions' % name)
outputs.append(name)
# Allow private vars from components.
elif '.' in name:
outputs.append(name)
# Open CSV file
outfile = open(filename, 'wb')
csv_writer = csv.writer(outfile, delimiter=delimiter,
quotechar=quotechar,
quoting=csv.QUOTE_NONNUMERIC)
# No automatic data type conversion is performed unless the
# QUOTE_NONNUMERIC format option is specified (in which case unquoted
# fields are transformed into floats).
# Write the data
# data is a list of lists where the inner list is the values and metadata
# for a case
sorted_input_keys = []
sorted_input_values = []
sorted_output_keys = []
sorted_output_values = []
for i, row in enumerate( data ):
input_keys = []
input_values = []
for name in inputs:
obj = row[ row.name_map[ name ] ]
for key, value in flatten_obj(name, obj):
input_keys.append(key)
input_values.append(value)
output_keys = []
output_values = []
for name in outputs:
obj = row[ row.name_map[ name ] ]
if name in pseudos:
name = pseudos[name]
for key, value in flatten_obj(name, obj):
output_keys.append(key)
output_values.append(value)
# This should not be necessary, however python's csv writer
# is not writing boolean variables correctly as strings.
for index, item in enumerate(input_values):
if isinstance(item, bool):
input_values[index] = str(item)
for index, item in enumerate(output_values):
if isinstance(item, bool):
output_values[index] = str(item)
# Sort the columns alphabetically.
if len(input_keys) > 0:
sorted_input_keys, sorted_input_values = \
(list(item) for item in zip(*sorted(zip(input_keys,
input_values))))
if len(output_keys) > 0:
sorted_output_keys, sorted_output_values = \
(list(item) for item in zip(*sorted(zip(output_keys,
output_values))))
if outfile is None:
raise RuntimeError('Attempt to record on closed recorder')
if i == 0:
headers = ['timestamp', '/INPUTS']
headers.extend(sorted_input_keys)
headers.append('/OUTPUTS')
headers.extend(sorted_output_keys)
headers.extend(['/METADATA', 'uuid', 'parent_uuid', 'msg'])
csv_writer.writerow(headers)
header_size = len(headers)
timestamp = row[ row.name_map[ 'timestamp' ] ]
csv_data = [timestamp]
csv_data.append('')
csv_data.extend(sorted_input_values)
csv_data.append('')
csv_data.extend(sorted_output_values)
exc = row[ row.name_map[ 'error_message' ] ]
msg = '' if exc is None else str(exc)
case_uuid = row[ row.name_map[ '_id' ] ]
parent_uuid = row[ row.name_map[ '_parent_id' ] ]
csv_data.extend(['', case_uuid, parent_uuid, msg])
if header_size != len(csv_data):
raise RuntimeError("number of data points (%d) doesn't match header"
" size (%d) in CSV recorder"
% (len(csv_data), header_size))
csv_writer.writerow(csv_data)
outfile.close()
|
the-stack_0_15058 | from tkinter import Frame, Canvas, Tk, Text, LEFT, INSERT, END, messagebox, Button, X
from alarming_service import time_diff
def GUI_present():
root = Tk()
canvas = Canvas(root)
canvas.pack()
frame = Frame(canvas)
frame.pack()
top_text = Text(frame)
top_text.insert(
INSERT,
"Welcome to the Simple Alarming Service"
)
top_text.pack()
alarm_set = Button(frame, text="Set and Deploy Alarm", command=time_diff)
alarm_set.pack(fill=X)
root.mainloop()
if __name__ == "__main__":
GUI_present() |
the-stack_0_15059 | """
This is a file includes the main function for gym atari reinforcement learning.
"""
import os
import gym
import numpy as np
from argparse_decorate import init_parser, add_arg
from deep_q_learning import DQLAtari
from sac_discrete import SoftActorCriticsDiscrete
@init_parser()
@add_arg('--start_episode', type=int, default=0, help='A number for start episode index.')
@add_arg('--eval', type=bool, default=False, help='True means evaluate model only.')
@add_arg('--game_index', type=int, default=1, choices=[0, 1, 2],
help='Represent Breakout, MsPacman and Pong respectively.')
@add_arg('--env_name', type=str, default=None, help='The name of the gym atari environment.')
@add_arg('--memory_size', type=int, default=100000, help='The size of the memory space.')
@add_arg('--start_epsilon', type=float, default=1.0, help='The probability for random actions.')
@add_arg('--min_epsilon', type=float, default=0.05, help='The probability for random actions.')
@add_arg('--reward_clip', type=bool, default=False, help='Clip reward in [-1, 1] range if True.')
@add_arg('--live_penalty', type=bool, default=True, help='Penalties when agent lose a life in the game.')
@add_arg('--agent', type=str, default='dsac', choices=['dql', 'dsac'],
help='Deep Q-learning and discrete soft Actor-Critics algorithms.')
def main(**kwargs):
"""
The main function for gym atari reinforcement learning.
"""
atari_game = ['BreakoutNoFrameskip-v4', 'MsPacmanNoFrameskip-v4', 'PongNoFrameskip-v4']
env_name = kwargs['env_name'] if kwargs['env_name'] is not None else atari_game[kwargs['game_index']]
dirs = './' + env_name
if not os.path.exists(dirs):
os.makedirs(dirs)
img_size = (4, 84, 84)
env = gym.make(env_name)
memory_par = (kwargs['memory_size'], img_size)
action_space = np.array([i for i in range(env.action_space.n)], dtype=np.uint8)
game = (env_name, env, kwargs['live_penalty'])
if kwargs['agent'] == 'dql':
agent = DQLAtari(memory_par=memory_par,
action_space=action_space,
game=game,
reward_clip=kwargs['reward_clip'],
epsilon=(kwargs['start_epsilon'], kwargs['min_epsilon']))
elif kwargs['agent'] == 'dsac':
agent = SoftActorCriticsDiscrete(memory_par=memory_par,
action_space=action_space,
game=game,
reward_clip=kwargs['reward_clip'])
else:
raise Exception('The agent does not exist.')
agent.simulate(net_path=dirs, start_episodes=kwargs['start_episode'], eval=kwargs['eval'], start_frames=0)
if __name__ == '__main__':
main()
|
the-stack_0_15060 |
class GPX:
def __init__(self,filename):
self.fd = open(filename,"w")
self.fd.write('<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>\n')
self.fd.write('<gpx version="1.1"\n')
self.fd.write(' creator="Osmawalk - https://github.com/xtompok/osmawalk"\n')
self.fd.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n')
self.fd.write(' xmlns="http://www.topografix.com/GPX/1/1"\n')
self.fd.write(' xsi:schemaLocation="http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd">\n')
self.wpts = []
def close(self):
for wpt in self.wpts:
self.fd.write(str(wpt))
self.wpts = []
self.fd.write('</gpx>\n');
self.fd.close()
def writeTrack(self,track):
self.startTrack()
for point in track:
self.writeTrkpt(point[0],point[1],point[2])
self.endTrack()
def addWpt(self,name,lat,lon,ele):
self.wpts.append(Wpt(name,lat,lon,ele))
def startTrack(self):
self.fd.write('<trk>\n<trkseg>\n')
def endTrack(self):
self.fd.write('</trkseg>\n</trk>\n')
for wpt in self.wpts:
self.fd.write(str(wpt))
self.wpts = []
def writeTrkpt(self,lat,lon,ele):
self.fd.write(' <trkpt lat="'+str(lat)+'" lon="'+str(lon)+'">\n')
self.fd.write(' <ele>'+str(ele)+'</ele>\n')
self.fd.write(' </trkpt>\n')
class Wpt:
def __init__(self,aname,alat,alon,anele):
self.name = aname
self.lat = alat
self.lon = alon
self.ele = anele
def __str__(self):
start='<wpt lat="'+str(self.lat)+'" lon="'+str(self.lon)+'">\n'
inner=' <name>'+str(self.name)+'</name>\n <ele>'+str(self.ele)+'</ele>\n'
end='</wpt>\n'
return start+inner+end
|
the-stack_0_15063 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Ensure that projects with multiple translation catalogs maintain translation
files correctly.
"""
# There was a bug that affected the `Translate()` when no targets were provided
# explicitelly via argument list. If, for exapmle, `pkg1/SConscript` and
# `pkg2/SConscript` scripts in some project `p1` had:
#
# Translate(LINGUAS_FILE = 1)
#
# then target languages defined in `pkg1/LINGUAS` would affect the list of
# target languages emitted by `pkg2/SConscript` and vice versa.
#
# The pull request #64 on bitbucket fixes this. Here is the test case to
# replicate the bug.
import TestSCons
from os import path
test = TestSCons.TestSCons()
if not test.where_is('xgettext'):
test.skip_test("could not find 'xgettext'; skipping test(s)\n")
if not test.where_is('msgmerge'):
test.skip_test("Could not find 'msgmerge'; skipping test(s)\n")
if not test.where_is('msginit'):
test.skip_test("could not find 'msginit'; skipping test(s)\n")
if not test.where_is('msgfmt'):
test.skip_test("could not find 'msgfmt'; skipping test(s)\n")
#############################################################################
# Test case 1
#############################################################################
test.subdir(['tc1'])
test.write( ['tc1', 'SConstruct'],
"""
env = Environment( tools = ["default", "gettext"] )
env.Replace(POAUTOINIT = 1)
env.Replace(LINGUAS_FILE = 1)
SConscript(["pkg1/SConscript", "pkg2/SConscript"], exports = ["env"])
""")
# package `pkg1`
test.subdir(['tc1', 'pkg1'])
test.write( ['tc1', 'pkg1', 'LINGUAS'],
"""
en
pl
""")
test.write( ['tc1', 'pkg1', 'SConscript'],
"""
Import("env")
env.Translate(source = ['a.cpp'])
""")
test.write(['tc1', 'pkg1', 'a.cpp'], """ gettext("Hello from pkg1/a.cpp") """ )
# package `pkg2`
test.subdir(['tc1', 'pkg2'])
test.write( ['tc1', 'pkg2', 'LINGUAS'],
"""
de
fr
""")
test.write( ['tc1', 'pkg2', 'SConscript'],
"""
Import("env")
env.Translate(source = ['b.cpp'])
""")
test.write(['tc1', 'pkg2', 'b.cpp'], """ gettext("Hello from pkg2/b.cpp") """ )
# NOTE: msgmerge(1) prints its messages to stderr, we must ignore them,
# So, stderr=None is crucial here. It is no point to match stderr to some
# specific valuse; the messages are internationalized :) ).
test.run(arguments = 'po-update', chdir = 'tc1', stderr = None)
test.must_exist( ['tc1', 'pkg1', 'en.po'] )
test.must_exist( ['tc1', 'pkg1', 'pl.po'] )
test.must_not_exist(['tc1', 'pkg1', 'de.po'] )
test.must_not_exist(['tc1', 'pkg1', 'fr.po'] )
test.must_exist( ['tc1', 'pkg2', 'de.po'] )
test.must_exist( ['tc1', 'pkg2', 'fr.po'] )
test.must_not_exist(['tc1', 'pkg2', 'en.po'] )
test.must_not_exist(['tc1', 'pkg2', 'pl.po'] )
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
the-stack_0_15065 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output |
the-stack_0_15066 | import os
import sys
import psutil
from monk.pytorch_prototype import prototype
from monk.compare_prototype import compare
from monk.pip_unit_tests.pytorch.common import print_start
from monk.pip_unit_tests.pytorch.common import print_status
import torch
import numpy as np
from monk.pytorch.losses.return_loss import load_loss
def test_block_inception_c(system_dict):
forward = True;
test = "test_block_inception_c";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
gtf = prototype(verbose=0);
gtf.Prototype("sample-project-1", "sample-experiment-1");
network = [];
network.append(gtf.inception_c_block(channels_7x7=3, pool_type="avg"));
network.append(gtf.inception_c_block(channels_7x7=3, pool_type="max"));
gtf.Compile_Network(network, data_shape=(1, 64, 64), use_gpu=False);
x = torch.randn(1, 1, 64, 64);
y = gtf.system_dict["local"]["model"](x);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
return system_dict
|
the-stack_0_15067 | import asyncio
import logging
import time
from typing import Iterable
import uvicorn
from fastapi import FastAPI
from fastapi.responses import RedirectResponse
from async_batch.batch_processor import BatchProcessor, TaskQueue
log = logging.getLogger(__file__)
LOGGING_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"()": "uvicorn.logging.DefaultFormatter",
"fmt": "%(asctime)-15s %(levelprefix)s %(message)s",
"use_colors": None,
},
"access": {
"()": "uvicorn.logging.AccessFormatter",
"fmt": '%(asctime)-15s %(levelprefix)s %(client_addr)s - "%(request_line)s" %(status_code)s',
},
},
"handlers": {
"default": {
"formatter": "default",
"class": "logging.StreamHandler",
"stream": "ext://sys.stderr",
},
"access": {
"formatter": "access",
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
},
},
"loggers": {
"": {"handlers": ["default"], "level": "DEBUG"},
"uvicorn.error": {"level": "DEBUG"},
"uvicorn.access": {"handlers": ["access"], "level": "DEBUG", "propagate": False},
},
}
class ExampleBatchProcessor(BatchProcessor):
def _process(self, batch_data: Iterable[int]) -> Iterable[int]:
return [x ** 2 for x in batch_data]
def __init__(self, batch_size: int):
self.batch_size = batch_size
def get_batch_size(self) -> int:
return self.batch_size
tq = TaskQueue(
batch_processor=ExampleBatchProcessor(2),
max_wait_time=3
)
app = FastAPI(
title="Async Batcher Example Project",
version="0.1",
description="Async Batch Project",
)
app.task_queue = tq
@app.on_event("startup")
async def start_task_queue():
log.info("Starting Task Queue")
app.task_queue.start()
@app.on_event("shutdown")
async def start_task_queue():
log.info("Stopping Task Queue")
app.task_queue.stop()
@app.get("/")
async def read_root():
"""
Got to document
"""
return RedirectResponse("docs")
@app.post("/set/interval")
async def set_interval(interval: float):
"""
Got to document
"""
app.task_queue._interval = interval
return {"status": "success"}
@app.post("/set/batch_size")
async def set_batch_size(batch_size: int):
"""
Got to document
"""
app.task_queue._batch_processor.batch_size = batch_size
return {"status": "success"}
@app.get("/test")
async def api_test(number: int):
log.info(f"Request come in with number={number}")
if not app.task_queue.is_alive():
if not app.task_queue.stop():
app.task_queue.start()
start_time = time.time()
data = await asyncio.wait_for(
app.task_queue.async_submit(number),
timeout=app.task_queue._interval + 1.0
)
spent = time.time() - start_time
return {
"status": "success",
"result": data,
"used_time": spent
}
if __name__ == '__main__':
uvicorn.run(app, log_config=LOGGING_CONFIG)
|
the-stack_0_15068 | """Registers functions to be called if an exception or signal occurs."""
import functools
import logging
import signal
import traceback
# pylint: disable=unused-import, no-name-in-module
from acme.magic_typing import Any, Callable, Dict, List, Union
# pylint: enable=unused-import, no-name-in-module
from certbot import errors
from certbot.compat import os
logger = logging.getLogger(__name__)
# _SIGNALS stores the signals that will be handled by the ErrorHandler. These
# signals were chosen as their default handler terminates the process and could
# potentially occur from inside Python. Signals such as SIGILL were not
# included as they could be a sign of something devious and we should terminate
# immediately.
if os.name != "nt":
_SIGNALS = [signal.SIGTERM]
for signal_code in [signal.SIGHUP, signal.SIGQUIT,
signal.SIGXCPU, signal.SIGXFSZ]:
# Adding only those signals that their default action is not Ignore.
# This is platform-dependent, so we check it dynamically.
if signal.getsignal(signal_code) != signal.SIG_IGN:
_SIGNALS.append(signal_code)
else:
# POSIX signals are not implemented natively in Windows, but emulated from the C runtime.
# As consumed by CPython, most of handlers on theses signals are useless, in particular
# SIGTERM: for instance, os.kill(pid, signal.SIGTERM) will call TerminateProcess, that stops
# immediately the process without calling the attached handler. Besides, non-POSIX signals
# (CTRL_C_EVENT and CTRL_BREAK_EVENT) are implemented in a console context to handle the
# CTRL+C event to a process launched from the console. Only CTRL_C_EVENT has a reliable
# behavior in fact, and maps to the handler to SIGINT. However in this case, a
# KeyboardInterrupt is raised, that will be handled by ErrorHandler through the context manager
# protocol. Finally, no signal on Windows is electable to be handled using ErrorHandler.
#
# Refs: https://stackoverflow.com/a/35792192, https://maruel.ca/post/python_windows_signal,
# https://docs.python.org/2/library/os.html#os.kill,
# https://www.reddit.com/r/Python/comments/1dsblt/windows_command_line_automation_ctrlc_question
_SIGNALS = []
class ErrorHandler(object):
"""Context manager for running code that must be cleaned up on failure.
The context manager allows you to register functions that will be called
when an exception (excluding SystemExit) or signal is encountered.
Usage::
handler = ErrorHandler(cleanup1_func, *cleanup1_args, **cleanup1_kwargs)
handler.register(cleanup2_func, *cleanup2_args, **cleanup2_kwargs)
with handler:
do_something()
Or for one cleanup function::
with ErrorHandler(func, args, kwargs):
do_something()
If an exception is raised out of do_something, the cleanup functions will
be called in last in first out order. Then the exception is raised.
Similarly, if a signal is encountered, the cleanup functions are called
followed by the previously received signal handler.
Each registered cleanup function is called exactly once. If a registered
function raises an exception, it is logged and the next function is called.
Signals received while the registered functions are executing are
deferred until they finish.
"""
def __init__(self, func, *args, **kwargs):
self.call_on_regular_exit = False
self.body_executed = False
self.funcs = [] # type: List[Callable[[], Any]]
self.prev_handlers = {} # type: Dict[int, Union[int, None, Callable]]
self.received_signals = [] # type: List[int]
if func is not None:
self.register(func, *args, **kwargs)
def __enter__(self):
self.body_executed = False
self._set_signal_handlers()
def __exit__(self, exec_type, exec_value, trace):
self.body_executed = True
retval = False
# SystemExit is ignored to properly handle forks that don't exec
if exec_type is SystemExit:
return retval
elif exec_type is None:
if not self.call_on_regular_exit:
return retval
elif exec_type is errors.SignalExit:
logger.debug("Encountered signals: %s", self.received_signals)
retval = True
else:
logger.debug("Encountered exception:\n%s", "".join(
traceback.format_exception(exec_type, exec_value, trace)))
self._call_registered()
self._reset_signal_handlers()
self._call_signals()
return retval
def register(self, func, *args, **kwargs):
# type: (Callable, *Any, **Any) -> None
"""Sets func to be run with the given arguments during cleanup.
:param function func: function to be called in case of an error
"""
self.funcs.append(functools.partial(func, *args, **kwargs))
def _call_registered(self):
"""Calls all registered functions"""
logger.debug("Calling registered functions")
while self.funcs:
try:
self.funcs[-1]()
except Exception: # pylint: disable=broad-except
logger.error("Encountered exception during recovery: ", exc_info=True)
self.funcs.pop()
def _set_signal_handlers(self):
"""Sets signal handlers for signals in _SIGNALS."""
for signum in _SIGNALS:
prev_handler = signal.getsignal(signum)
# If prev_handler is None, the handler was set outside of Python
if prev_handler is not None:
self.prev_handlers[signum] = prev_handler
signal.signal(signum, self._signal_handler)
def _reset_signal_handlers(self):
"""Resets signal handlers for signals in _SIGNALS."""
for signum in self.prev_handlers:
signal.signal(signum, self.prev_handlers[signum])
self.prev_handlers.clear()
def _signal_handler(self, signum, unused_frame):
"""Replacement function for handling received signals.
Store the received signal. If we are executing the code block in
the body of the context manager, stop by raising signal exit.
:param int signum: number of current signal
"""
self.received_signals.append(signum)
if not self.body_executed:
raise errors.SignalExit
def _call_signals(self):
"""Finally call the deferred signals."""
for signum in self.received_signals:
logger.debug("Calling signal %s", signum)
os.kill(os.getpid(), signum)
class ExitHandler(ErrorHandler):
"""Context manager for running code that must be cleaned up.
Subclass of ErrorHandler, with the same usage and parameters.
In addition to cleaning up on all signals, also cleans up on
regular exit.
"""
def __init__(self, func, *args, **kwargs):
ErrorHandler.__init__(self, func, *args, **kwargs)
self.call_on_regular_exit = True
|
the-stack_0_15069 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 23 19:40:08 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import gc
import importlib.util
import inspect
import os
from pathlib import Path
import shlex
import subprocess
import sys
import sphinx_gallery
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curr_path = Path(__file__).expanduser().absolute().parent
if curr_path.name == "_staging":
# Can't use curr_path.parent, because sphinx_gallery requires a relative path.
tvm_path = Path(os.pardir, os.pardir)
else:
tvm_path = Path(os.pardir)
sys.path.insert(0, str(tvm_path.resolve() / "python"))
sys.path.insert(0, str(tvm_path.resolve() / "vta" / "python"))
sys.path.insert(0, str(tvm_path.resolve() / "docs"))
# -- General configuration ------------------------------------------------
locale_dirs = ["translates/locales/"]
gettext_compact = False
# General information about the project.
project = "tvm"
author = "Apache Software Foundation"
copyright = "2020 - 2021, %s" % author
github_doc_root = "https://github.com/apache/tvm/tree/main/docs/"
os.environ["TVM_BUILD_DOC"] = "1"
def git_describe_version(original_version):
"""Get git describe version."""
ver_py = tvm_path.joinpath("version.py")
libver = {"__file__": ver_py}
exec(compile(open(ver_py, "rb").read(), ver_py, "exec"), libver, libver)
_, gd_version = libver["git_describe_version"]()
if gd_version != original_version:
print("Use git describe based version %s" % gd_version)
return gd_version
# Version information.
import tvm
from tvm import topi
from tvm import te
from tvm import testing
version = git_describe_version(tvm.__version__)
release = version
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.mathjax",
"sphinx_gallery.gen_gallery",
"autodocsumm",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# generate autosummary even if no references
autosummary_generate = True
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "_staging"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme is set by the make target
html_theme = os.environ.get("TVM_THEME", "rtd")
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
# only import rtd theme and set it if want to build docs locally
if not on_rtd and html_theme == "rtd":
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_theme_options = {
"analytics_id": "UA-75982049-2",
"logo_only": True,
}
html_logo = "_static/img/tvm-logo-small.png"
html_favicon = "_static/img/tvm-logo-square.png"
# Output file base name for HTML help builder.
htmlhelp_basename = project + "doc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "%s.tex" % project, project, author, "manual"),
]
intersphinx_mapping = {
"python": ("https://docs.python.org/{.major}".format(sys.version_info), None),
"numpy": ("https://numpy.org/doc/stable", None),
"scipy": ("https://docs.scipy.org/doc/scipy-1.8.0/html-scipyorg/", None),
"matplotlib": ("https://matplotlib.org/", None),
}
from sphinx_gallery.sorting import ExplicitOrder
examples_dirs = [
tvm_path.joinpath("gallery", "tutorial"),
tvm_path.joinpath("gallery", "how_to", "compile_models"),
tvm_path.joinpath("gallery", "how_to", "deploy_models"),
tvm_path.joinpath("gallery", "how_to", "work_with_relay"),
tvm_path.joinpath("gallery", "how_to", "work_with_schedules"),
tvm_path.joinpath("gallery", "how_to", "optimize_operators"),
tvm_path.joinpath("gallery", "how_to", "tune_with_autotvm"),
tvm_path.joinpath("gallery", "how_to", "tune_with_autoscheduler"),
tvm_path.joinpath("gallery", "how_to", "work_with_microtvm"),
tvm_path.joinpath("gallery", "how_to", "extend_tvm"),
tvm_path.joinpath("vta", "tutorials"),
]
gallery_dirs = [
"tutorial",
"how_to/compile_models",
"how_to/deploy_models",
"how_to/work_with_relay",
"how_to/work_with_schedules",
"how_to/optimize_operators",
"how_to/tune_with_autotvm",
"how_to/tune_with_autoscheduler",
"how_to/work_with_microtvm",
"how_to/extend_tvm",
"topic/vta/tutorials",
]
subsection_order = ExplicitOrder(
str(p)
for p in [
tvm_path / "vta" / "tutorials" / "frontend",
tvm_path / "vta" / "tutorials" / "optimize",
tvm_path / "vta" / "tutorials" / "autotvm",
]
)
# Explicitly define the order within a subsection.
# The listed files are sorted according to the list.
# The unlisted files are sorted by filenames.
# The unlisted files always appear after listed files.
within_subsection_order = {
"tutorial": [
"introduction.py",
"install.py",
"tvmc_command_line_driver.py",
"tvmc_python.py",
"autotvm_relay_x86.py",
"tensor_expr_get_started.py",
"autotvm_matmul_x86.py",
"auto_scheduler_matmul_x86.py",
"tensor_ir_blitz_course.py",
"topi.pi",
"cross_compilation_and_rpc.py",
"relay_quick_start.py",
],
"compile_models": [
"from_pytorch.py",
"from_tensorflow.py",
"from_mxnet.py",
"from_onnx.py",
"from_keras.py",
"from_tflite.py",
"from_coreml.py",
"from_darknet.py",
"from_caffe2.py",
"from_paddle.py",
],
"work_with_schedules": [
"schedule_primitives.py",
"reduction.py",
"intrin_math.py",
"scan.py",
"extern_op.py",
"tensorize.py",
"tuple_inputs.py",
"tedd.py",
],
"optimize_operators": [
"opt_gemm.py",
"opt_conv_cuda.py",
"opt_conv_tensorcore.py",
],
"tune_with_autotvm": [
"tune_conv2d_cuda.py",
"tune_relay_cuda.py",
"tune_relay_x86.py",
"tune_relay_arm.py",
"tune_relay_mobile_gpu.py",
],
"tune_with_autoscheduler": [
"tune_matmul_x86.py",
"tune_conv2d_layer_cuda.py",
"tune_network_x86.py",
"tune_network_cuda.py",
],
"extend_tvm": [
"low_level_custom_pass.py",
"use_pass_infra.py",
"use_pass_instrument.py",
"bring_your_own_datatypes.py",
],
"micro": [
"micro_autotune.py",
"micro_reference_vm.py",
"micro_tflite.py",
"micro_ethosu.py",
"micro_tvmc.py",
],
}
class WithinSubsectionOrder:
def __init__(self, src_dir):
self.src_dir = src_dir.split("/")[-1]
def __call__(self, filename):
# If the order is provided, use the provided order
if (
self.src_dir in within_subsection_order
and filename in within_subsection_order[self.src_dir]
):
index = within_subsection_order[self.src_dir].index(filename)
assert index < 1e10
return "\0%010d" % index
# Otherwise, sort by filename
return filename
# When running the tutorials on GPUs we are dependent on the Python garbage collector
# collecting TVM packed function closures for any device memory to also be released. This
# is not a good setup for machines with lots of CPU ram but constrained GPU ram, so force
# a gc after each example.
def force_gc(gallery_conf, fname):
gc.collect()
sphinx_gallery_conf = {
"backreferences_dir": "gen_modules/backreferences",
"doc_module": ("tvm", "numpy"),
"reference_url": {
"tvm": None,
"matplotlib": "https://matplotlib.org/",
"numpy": "https://numpy.org/doc/stable",
},
"examples_dirs": examples_dirs,
"within_subsection_order": WithinSubsectionOrder,
"gallery_dirs": gallery_dirs,
"subsection_order": subsection_order,
"filename_pattern": os.environ.get("TVM_TUTORIAL_EXEC_PATTERN", ".py"),
"find_mayavi_figures": False,
"download_all_examples": False,
"min_reported_time": 60,
"expected_failing_examples": [],
"reset_modules": ("matplotlib", "seaborn", force_gc),
}
autodoc_default_options = {
"member-order": "bysource",
}
# Maps the original namespace to list of potential modules
# that we can import alias from.
tvm_alias_check_map = {
"tvm.te": ["tvm.tir"],
"tvm.tir": ["tvm.ir", "tvm.runtime"],
"tvm.relay": ["tvm.ir", "tvm.tir"],
}
## Setup header and other configs
import tlcpack_sphinx_addon
footer_copyright = "© 2020 Apache Software Foundation | All right reserved"
footer_note = " ".join(
"""
Copyright © 2020 The Apache Software Foundation. Apache TVM, Apache, the Apache feather,
and the Apache TVM project logo are either trademarks or registered trademarks of
the Apache Software Foundation.""".split(
"\n"
)
).strip()
header_logo = "https://tvm.apache.org/assets/images/logo.svg"
header_logo_link = "https://tvm.apache.org/"
header_links = [
("Community", "https://tvm.apache.org/community"),
("Download", "https://tvm.apache.org/download"),
("VTA", "https://tvm.apache.org/vta"),
("Blog", "https://tvm.apache.org/blog"),
("Docs", "https://tvm.apache.org/docs"),
("Conference", "https://tvmconf.org"),
("Github", "https://github.com/apache/tvm/"),
]
header_dropdown = {
"name": "ASF",
"items": [
("Apache Homepage", "https://apache.org/"),
("License", "https://www.apache.org/licenses/"),
("Sponsorship", "https://www.apache.org/foundation/sponsorship.html"),
("Security", "https://www.apache.org/security/"),
("Thanks", "https://www.apache.org/foundation/thanks.html"),
("Events", "https://www.apache.org/events/current-event"),
],
}
html_context = {
"footer_copyright": footer_copyright,
"footer_note": footer_note,
"header_links": header_links,
"header_dropdown": header_dropdown,
"header_logo": header_logo,
"header_logo_link": header_logo_link,
}
# add additional overrides
templates_path += [tlcpack_sphinx_addon.get_templates_path()]
html_static_path += [tlcpack_sphinx_addon.get_static_path()]
def update_alias_docstring(name, obj, lines):
"""Update the docstring of alias functions.
This function checks if the obj is an alias of another documented object
in a different module.
If it is an alias, then it will append the alias information to the docstring.
Parameters
----------
name : str
The full name of the object in the doc.
obj : object
The original object.
lines : list
The docstring lines, need to be modified inplace.
"""
arr = name.rsplit(".", 1)
if len(arr) != 2:
return
target_mod, target_name = arr
if target_mod not in tvm_alias_check_map:
return
if not hasattr(obj, "__module__"):
return
obj_mod = obj.__module__
for amod in tvm_alias_check_map[target_mod]:
if not obj_mod.startswith(amod):
continue
if hasattr(sys.modules[amod], target_name):
obj_type = ":py:func" if callable(obj) else ":py:class"
lines.append(".. rubric:: Alias of %s:`%s.%s`" % (obj_type, amod, target_name))
def process_docstring(app, what, name, obj, options, lines):
"""Sphinx callback to process docstring"""
if callable(obj) or inspect.isclass(obj):
update_alias_docstring(name, obj, lines)
from legacy_redirect import build_legacy_redirect
def setup(app):
app.connect("autodoc-process-docstring", process_docstring)
app.connect("build-finished", build_legacy_redirect(tvm_path))
|
the-stack_0_15070 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenAuthAppCancelModel(object):
def __init__(self):
self._auth_app_id = None
self._auth_scene = None
self._operator_user_id = None
@property
def auth_app_id(self):
return self._auth_app_id
@auth_app_id.setter
def auth_app_id(self, value):
self._auth_app_id = value
@property
def auth_scene(self):
return self._auth_scene
@auth_scene.setter
def auth_scene(self, value):
self._auth_scene = value
@property
def operator_user_id(self):
return self._operator_user_id
@operator_user_id.setter
def operator_user_id(self, value):
self._operator_user_id = value
def to_alipay_dict(self):
params = dict()
if self.auth_app_id:
if hasattr(self.auth_app_id, 'to_alipay_dict'):
params['auth_app_id'] = self.auth_app_id.to_alipay_dict()
else:
params['auth_app_id'] = self.auth_app_id
if self.auth_scene:
if hasattr(self.auth_scene, 'to_alipay_dict'):
params['auth_scene'] = self.auth_scene.to_alipay_dict()
else:
params['auth_scene'] = self.auth_scene
if self.operator_user_id:
if hasattr(self.operator_user_id, 'to_alipay_dict'):
params['operator_user_id'] = self.operator_user_id.to_alipay_dict()
else:
params['operator_user_id'] = self.operator_user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenAuthAppCancelModel()
if 'auth_app_id' in d:
o.auth_app_id = d['auth_app_id']
if 'auth_scene' in d:
o.auth_scene = d['auth_scene']
if 'operator_user_id' in d:
o.operator_user_id = d['operator_user_id']
return o
|
the-stack_0_15071 | # -*- coding: utf-8 -*-
import click
import json
from ..utils.spinner import (
init_spinner,
start_spinner,
stop_spinner,
)
from ..utils.print import (
tbprint,
eprint,
oprint,
opprint,
)
@click.group()
@click.pass_obj
@click.pass_context
def command_runner(ctx, obj):
"""DNA Center Command Runner API (version: 1.3.3).
Wraps the DNA Center Command Runner API and exposes the API as native Python commands.
"""
ctx.obj = obj.command_runner
@command_runner.command()
@click.option('--headers', type=str, help='''Dictionary of HTTP Headers to send with the Request.''',
default=None,
show_default=True)
@click.option('-pp', '--pretty_print', type=int, help='''Pretty print indent''',
default=None,
show_default=True)
@click.option('--beep', is_flag=True, help='''Spinner beep (on)''')
@click.pass_obj
def get_all_keywords_of_clis_accepted(obj, pretty_print, beep,
headers):
"""Get valid keywords.
"""
spinner = init_spinner(beep=beep)
start_spinner(spinner)
try:
if headers is not None:
headers = json.loads(headers)
result = obj.get_all_keywords_of_clis_accepted(
headers=headers)
stop_spinner(spinner)
opprint(result, indent=pretty_print)
except Exception as e:
stop_spinner(spinner)
tbprint()
eprint('Error:', e)
click.Context.exit(-1)
@command_runner.command()
@click.option('--commands', type=str, multiple=True,
help='''CommandRunnerDTO's commands (list of strings).''',
default=None,
show_default=True)
@click.option('--description', type=str,
help='''CommandRunnerDTO's description.''',
default=None,
show_default=True)
@click.option('--deviceuuids', type=str, multiple=True,
help='''CommandRunnerDTO's deviceUuids (list of strings).''',
default=None,
show_default=True)
@click.option('--name', type=str,
help='''CommandRunnerDTO's name.''',
default=None,
show_default=True)
@click.option('--timeout', type=int,
help='''CommandRunnerDTO's timeout.''',
default=None,
show_default=True)
@click.option('--headers', type=str, help='''Dictionary of HTTP Headers to send with the Request.''',
default=None,
show_default=True)
@click.option('--payload', type=str, help='''A JSON serializable Python object to send in the body of the Request.''',
default=None,
show_default=True)
@click.option('--active_validation', type=bool, help='''Enable/Disable payload validation.''',
default=True,
show_default=True)
@click.option('-pp', '--pretty_print', type=int, help='''Pretty print indent''',
default=None,
show_default=True)
@click.option('--beep', is_flag=True, help='''Spinner beep (on)''')
@click.pass_obj
def run_read_only_commands_on_devices(obj, pretty_print, beep,
commands,
description,
deviceuuids,
name,
timeout,
headers,
payload,
active_validation):
"""Submit request for read-only CLIs.
"""
spinner = init_spinner(beep=beep)
start_spinner(spinner)
try:
if headers is not None:
headers = json.loads(headers)
if payload is not None:
payload = json.loads(payload)
commands = list(commands)
commands = commands if len(commands) > 0 else None
deviceuuids = list(deviceuuids)
deviceuuids = deviceuuids if len(deviceuuids) > 0 else None
result = obj.run_read_only_commands_on_devices(
commands=commands,
description=description,
deviceUuids=deviceuuids,
name=name,
timeout=timeout,
headers=headers,
payload=payload,
active_validation=active_validation)
stop_spinner(spinner)
opprint(result, indent=pretty_print)
except Exception as e:
stop_spinner(spinner)
tbprint()
eprint('Error:', e)
click.Context.exit(-1)
|
the-stack_0_15074 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from datetime import datetime, timedelta
from django.utils.translation import ugettext as _
from notebook.connectors.dataeng import DataEng, DATE_FORMAT
from jobbrowser.apis.base_api import Api
LOG = logging.getLogger(__name__)
class DataEngClusterApi(Api):
def apps(self, filters):
api = DataEng(self.user)
jobs = api.list_clusters()
return {
'apps': [{
'id': app['crn'],
'name': '%(clusterName)s' % app,
'status': app['status'],
'apiStatus': self._api_status(app['status']),
'type': '%(serviceType)s %(workersGroupSize)s %(instanceType)s %(cdhVersion)s' % app,
'user': app['clusterName'].split('-', 1)[0],
'progress': 100,
'queue': 'group',
'duration': 1,
'submitted': app['creationDate']
} for app in jobs['clusters']],
'total': len(jobs)
}
def app(self, appid):
return {}
def action(self, appid, action):
return {}
def logs(self, appid, app_type, log_name=None, is_embeddable=False):
return {'logs': ''}
def profile(self, appid, app_type, app_property):
return {}
def _api_status(self, status):
if status in ['CREATING', 'CREATED', 'TERMINATING']:
return 'RUNNING'
elif status in ['ARCHIVING', 'COMPLETED']:
return 'SUCCEEDED'
else:
return 'FAILED' # KILLED and FAILED
class DataEngJobApi(Api):
def apps(self, filters):
kwargs = {}
if 'time' in filters:
if filters['time']['time_unit'] == 'minutes':
delta = timedelta(minutes=int(filters['time']['time_value']))
elif filters['time']['time_unit'] == 'hours':
delta = timedelta(hours=int(filters['time']['time_value']))
else:
delta = timedelta(days=int(filters['time']['time_value']))
kwargs['creation_date_after'] = (datetime.today() - delta).strftime(DATE_FORMAT)
# Todo: filter on 'cluster_crn'
api = DataEng(self.user)
jobs = api.list_jobs(**kwargs)
return {
'apps': [{
'id': app['jobId'],
'name': app['creationDate'],
'status': app['status'],
'apiStatus': self._api_status(app['status']),
'type': app['jobType'],
'user': '',
'progress': 100,
'duration': 10 * 3600,
'submitted': app['creationDate']
} for app in jobs['jobs']],
'total': len(jobs)
}
def app(self, appid):
handle = DataEng(self.user).describe_job(job_id=appid)
job = handle['job']
common = {
'id': job['jobId'],
'name': job['jobId'],
'status': job['status'],
'apiStatus': self._api_status(job['status']),
'progress': 50,
'duration': 10 * 3600,
'submitted': job['creationDate'],
'type': 'dataeng-job-%s' % job['jobType'],
}
common['properties'] = {
'properties': job
}
return common
def action(self, appid, action):
return {}
def logs(self, appid, app_type, log_name=None, is_embeddable=False):
return {'logs': ''}
def profile(self, appid, app_type, app_property):
return {}
def _api_status(self, status):
if status in ['CREATING', 'CREATED', 'TERMINATING']:
return 'RUNNING'
elif status in ['COMPLETED']:
return 'SUCCEEDED'
else:
return 'FAILED' # INTERRUPTED , KILLED, TERMINATED and FAILED
|
the-stack_0_15076 | # Import the modules
import cv2
import numpy as np
import os
import imutils
from imutils.video import WebcamVideoStream
DEBUG = True
# Custom functions
# define rotate function
def rotate(image, angle, center=None, scale=1.0):
# get image size
(h, w) = image.shape[:2]
# if dosen't assign image center, set image center point as center
if center is None:
center = (w / 2, h / 2)
# Do rotation
M = cv2.getRotationMatrix2D(center, angle, scale)
rotated = cv2.warpAffine(image, M, (w, h))
# return rotated image
return rotated
# Load the model
os.chdir(os.path.dirname(__file__))
net = cv2.dnn.readNet('../model/inference_graph.xml', '../model/inference_graph.bin')
# Specify target device
net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)
# Read the Camera
vs = WebcamVideoStream(src=0).start()
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=600)
frame = rotate(frame, 180)
# Convert to grayscale and apply Gaussian filtering
im_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
im_gray = cv2.GaussianBlur(im_gray, (5, 5), 0)
# Threshold the image (_INV: Inverse 黑白反轉)
ret, im_th = cv2.threshold(im_gray, 127, 255, cv2.THRESH_BINARY_INV)
if DEBUG: im_th_display = im_th.copy()
# Find contours in the image
ctrs, hier = cv2.findContours(im_th.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Get rectangles contains each contour
rects = [cv2.boundingRect(ctr) for ctr in ctrs]
# For each rectangular region, use mnist cnn model to predict.
for rect in rects:
# detect empty rect
# (x, y, w, h) => ex. (0, 0, 600, 450)
#ignore too small, too big, bad w:h ratio rect
if(rect[2]*rect[3] < 60 or rect[2]*rect[3] > 20000 or rect[2]>rect[3]*10):
if DEBUG: print('info:{}, IGNORE'.format(rect))
break
else:
if DEBUG: print('info:{}, DISPLAY'.format(rect))
else: pass
cv2.rectangle(frame, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), (0, 255, 0), 3)
# Make the rectangular region around the digit
leng = int(rect[3] * 1.6)
pt1 = int(rect[1] + rect[3] // 2 - leng // 2)
pt2 = int(rect[0] + rect[2] // 2 - leng // 2)
roi = im_th[pt1:pt1+leng, pt2:pt2+leng]
# Draw the rectangles
if DEBUG: cv2.rectangle(im_th_display, (pt2, pt1), (pt2+leng, pt1+leng), (255, 255, 255), 3)
# Prevent error: (-215 Assertion failed) !ssize.empty() in function 'resize'
if(roi.size == 0): break
# Resize the image
roi = cv2.resize(roi, (28, 28), interpolation=cv2.INTER_AREA)
# 膨脹
roi = cv2.dilate(roi, (3, 3))
# Inference
blob = cv2.dnn.blobFromImage(roi, size=(28, 28), ddepth=cv2.CV_32F)
net.setInput(blob)
out = net.forward()
if out[0][int(np.argmax(out[0]))] > 0.5:
#cv2.putText(image, text, coordinate, font, size, color, width of line, type of line)
#cv2.putText(影像, 文字, 座標, 字型, 大小, 顏色, 線條寬度, 線條種類)
#if DEBUG: cv2.putText(im_th_display, str(np.argmax(out[0])), (rect[0], rect[1]), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 255), 3)
cv2.putText(frame, str(np.argmax(out[0])), (rect[0], rect[1]), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 255), 3)
if DEBUG: cv2.imshow("Debug", im_th_display)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
|
the-stack_0_15078 | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, LSTM, Dropout
import tensorflow.keras as keras
from zoo.automl.model.abstract import BaseModel
from zoo.automl.common.util import *
from zoo.automl.common.metrics import Evaluator
class VanillaLSTM(BaseModel):
def __init__(self, check_optional_config=False, future_seq_len=1):
"""
Constructor of Vanilla LSTM model
"""
self.model = None
self.check_optional_config = check_optional_config
self.future_seq_len = future_seq_len
self.feature_num = None
self.metric = None
self.batch_size = None
self.loss = None
def _get_dropout(self, input_tensor, p=0.5, mc=False):
if mc:
return Dropout(p)(input_tensor, training=True)
else:
return Dropout(p)(input_tensor)
def _build(self, mc=False, **config):
"""
build vanilla LSTM model
:param config: model hyper parameters
:return: self
"""
super()._check_config(**config)
self.metric = config.get('metric', 'mean_squared_error')
self.batch_size = config.get('batch_size', 1024)
self.feature_num = config["feature_num"]
self.loss = config.get("loss", "mse")
inp = Input(shape=(None, self.feature_num))
lstm_1 = LSTM(units=config.get('lstm_1_units', 20),
return_sequences=True)(inp)
dropout_1 = self._get_dropout(lstm_1,
p=config.get('dropout_1', 0.2),
mc=mc)
lstm_2 = LSTM(units=config.get('lstm_2_units', 10),
return_sequences=False)(dropout_1)
dropout_2 = self._get_dropout(lstm_2,
p=config.get('dropout_2', 0.2),
mc=mc)
out = Dense(self.future_seq_len)(dropout_2)
self.model = Model(inputs=inp, outputs=out)
self.model.compile(loss=self.loss,
metrics=[self.metric],
optimizer=keras.optimizers.RMSprop(lr=config.get('lr', 0.001)))
return self.model
def fit_eval(self, x, y, validation_data=None, mc=False, verbose=0, **config):
"""
fit for one iteration
:param x: 3-d array in format (no. of samples, past sequence length, 2+feature length),
in the last dimension, the 1st col is the time index (data type needs to be numpy datetime
type, e.g. "datetime64"),
the 2nd col is the target value (data type should be numeric)
:param y: 2-d numpy array in format (no. of samples, future sequence length)
if future sequence length > 1, or 1-d numpy array in format (no. of samples, )
if future sequence length = 1
:param validation_data: tuple in format (x_test,y_test), data used for validation.
If this is specified, validation result will be the optimization target for automl.
Otherwise, train metric will be the optimization target.
:param config: optimization hyper parameters
:return: the resulting metric
"""
config.update({"feature_num": x.shape[2]})
# if model is not initialized, __build the model
if self.model is None:
self._build(mc=mc, **config)
hist = self.model.fit(x, y,
validation_data=validation_data,
batch_size=self.batch_size,
epochs=config.get("epochs", 10),
verbose=verbose
)
# print(hist.history)
if validation_data is None:
# get train metrics
# results = self.model.evaluate(x, y)
result = hist.history.get(self.metric)[0]
else:
result = hist.history.get('val_' + str(self.metric))[0]
return result
def evaluate(self, x, y, metric=['mse']):
"""
Evaluate on x, y
:param x: input
:param y: target
:param metric: a list of metrics in string format
:return: a list of metric evaluation results
"""
y_pred = self.predict(x)
return [Evaluator.evaluate(m, y, y_pred) for m in metric]
def predict(self, x, mc=False):
"""
Prediction on x.
:param x: input
:return: predicted y
"""
return self.model.predict(x)
def predict_with_uncertainty(self, x, n_iter=100):
result = np.zeros((n_iter,) + (x.shape[0], self.future_seq_len))
for i in range(n_iter):
result[i, :, :] = self.predict(x)
prediction = result.mean(axis=0)
uncertainty = result.std(axis=0)
return prediction, uncertainty
def save(self, model_path, config_path):
"""
save model to file.
:param model_path: the model file.
:param config_path: the config file
:return:
"""
self.model.save(model_path)
# os.rename("vanilla_lstm_tmp.h5", model_path)
config_to_save = {
# "future_seq_len": self.future_seq_len,
"metric": self.metric,
"batch_size": self.batch_size
}
save_config(config_path, config_to_save)
def restore(self, model_path, **config):
"""
restore model from file
:param model_path: the model file
:param config: the trial config
:return: the restored model
"""
# self.model = None
# self._build(**config)
self.model = keras.models.load_model(model_path)
# self.model.load_weights(file_path)
# self.future_seq_len = config["future_seq_len"]
# for continuous training
self.metric = config["metric"]
self.batch_size = config["batch_size"]
def _get_required_parameters(self):
return {
"feature_num"
}
def _get_optional_parameters(self):
return {
'lstm_1_units',
'dropout_1',
'lstm_2_units',
'dropout_2',
'metric',
'lr',
'epochs',
'batch_size',
'loss'
}
|
the-stack_0_15079 | """Support to interface with the Plex API."""
from functools import wraps
import json
import logging
import plexapi.exceptions
import requests.exceptions
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN, MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
SUPPORT_BROWSE_MEDIA,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_STOP,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import STATE_IDLE, STATE_PAUSED, STATE_PLAYING
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity_registry import async_get_registry
from homeassistant.helpers.network import is_internal_request
from .const import (
COMMON_PLAYERS,
CONF_SERVER_IDENTIFIER,
DISPATCHERS,
DOMAIN as PLEX_DOMAIN,
NAME_FORMAT,
PLEX_NEW_MP_SIGNAL,
PLEX_UPDATE_MEDIA_PLAYER_SESSION_SIGNAL,
PLEX_UPDATE_MEDIA_PLAYER_SIGNAL,
PLEX_UPDATE_SENSOR_SIGNAL,
SERVERS,
)
from .media_browser import browse_media
_LOGGER = logging.getLogger(__name__)
def needs_session(func):
"""Ensure session is available for certain attributes."""
@wraps(func)
def get_session_attribute(self, *args):
if self.session is None:
return None
return func(self, *args)
return get_session_attribute
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Plex media_player from a config entry."""
server_id = config_entry.data[CONF_SERVER_IDENTIFIER]
registry = await async_get_registry(hass)
@callback
def async_new_media_players(new_entities):
_async_add_entities(hass, registry, async_add_entities, server_id, new_entities)
unsub = async_dispatcher_connect(
hass, PLEX_NEW_MP_SIGNAL.format(server_id), async_new_media_players
)
hass.data[PLEX_DOMAIN][DISPATCHERS][server_id].append(unsub)
_LOGGER.debug("New entity listener created")
@callback
def _async_add_entities(hass, registry, async_add_entities, server_id, new_entities):
"""Set up Plex media_player entities."""
_LOGGER.debug("New entities: %s", new_entities)
entities = []
plexserver = hass.data[PLEX_DOMAIN][SERVERS][server_id]
for entity_params in new_entities:
plex_mp = PlexMediaPlayer(plexserver, **entity_params)
entities.append(plex_mp)
# Migration to per-server unique_ids
old_entity_id = registry.async_get_entity_id(
MP_DOMAIN, PLEX_DOMAIN, plex_mp.machine_identifier
)
if old_entity_id is not None:
new_unique_id = f"{server_id}:{plex_mp.machine_identifier}"
_LOGGER.debug(
"Migrating unique_id from [%s] to [%s]",
plex_mp.machine_identifier,
new_unique_id,
)
registry.async_update_entity(old_entity_id, new_unique_id=new_unique_id)
async_add_entities(entities, True)
class PlexMediaPlayer(MediaPlayerEntity):
"""Representation of a Plex device."""
def __init__(self, plex_server, device, player_source, session=None):
"""Initialize the Plex device."""
self.plex_server = plex_server
self.device = device
self.player_source = player_source
self.device_make = None
self.device_platform = None
self.device_product = None
self.device_title = None
self.device_version = None
self.machine_identifier = device.machineIdentifier
self.session_device = None
self._available = False
self._device_protocol_capabilities = None
self._name = None
self._previous_volume_level = 1 # Used in fake muting
self._state = STATE_IDLE
self._volume_level = 1 # since we can't retrieve remotely
self._volume_muted = False # since we can't retrieve remotely
# Initializes other attributes
self.session = session
async def async_added_to_hass(self):
"""Run when about to be added to hass."""
_LOGGER.debug("Added %s [%s]", self.entity_id, self.unique_id)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
PLEX_UPDATE_MEDIA_PLAYER_SIGNAL.format(self.unique_id),
self.async_refresh_media_player,
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
PLEX_UPDATE_MEDIA_PLAYER_SESSION_SIGNAL.format(self.unique_id),
self.async_update_from_websocket,
)
)
@callback
def async_refresh_media_player(self, device, session, source):
"""Set instance objects and trigger an entity state update."""
_LOGGER.debug("Refreshing %s [%s / %s]", self.entity_id, device, session)
self.device = device
self.session = session
if source:
self.player_source = source
self.async_schedule_update_ha_state(True)
async_dispatcher_send(
self.hass,
PLEX_UPDATE_SENSOR_SIGNAL.format(self.plex_server.machine_identifier),
)
@callback
def async_update_from_websocket(self, state):
"""Update the entity based on new websocket data."""
self.update_state(state)
self.async_write_ha_state()
async_dispatcher_send(
self.hass,
PLEX_UPDATE_SENSOR_SIGNAL.format(self.plex_server.machine_identifier),
)
def update(self):
"""Refresh key device data."""
if not self.session:
self.force_idle()
if not self.device:
self._available = False
return
self._available = True
try:
device_url = self.device.url("/")
except plexapi.exceptions.BadRequest:
device_url = "127.0.0.1"
if "127.0.0.1" in device_url:
self.device.proxyThroughServer()
self._device_protocol_capabilities = self.device.protocolCapabilities
for device in filter(None, [self.device, self.session_device]):
self.device_make = self.device_make or device.device
self.device_platform = self.device_platform or device.platform
self.device_product = self.device_product or device.product
self.device_title = self.device_title or device.title
self.device_version = self.device_version or device.version
name_parts = [self.device_product, self.device_title or self.device_platform]
if (self.device_product in COMMON_PLAYERS) and self.device_make:
# Add more context in name for likely duplicates
name_parts.append(self.device_make)
if self.username and self.username != self.plex_server.owner:
# Prepend username for shared/managed clients
name_parts.insert(0, self.username)
self._name = NAME_FORMAT.format(" - ".join(name_parts))
def force_idle(self):
"""Force client to idle."""
self._state = STATE_IDLE
if self.player_source == "session":
self.device = None
self.session_device = None
self._available = False
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return False
@property
def unique_id(self):
"""Return the id of this plex client."""
return f"{self.plex_server.machine_identifier}:{self.machine_identifier}"
@property
def session(self):
"""Return the active session for this player."""
return self._session
@session.setter
def session(self, session):
self._session = session
if session:
self.session_device = self.session.player
self.update_state(self.session.state)
else:
self._state = STATE_IDLE
@property
def available(self):
"""Return the availability of the client."""
return self._available
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
@needs_session
def username(self):
"""Return the username of the client owner."""
return self.session.username
@property
def state(self):
"""Return the state of the device."""
return self._state
def update_state(self, state):
"""Set the state of the device, handle session termination."""
if state == "playing":
self._state = STATE_PLAYING
elif state == "paused":
self._state = STATE_PAUSED
elif state == "stopped":
self.session = None
self.force_idle()
else:
self._state = STATE_IDLE
@property
def _is_player_active(self):
"""Report if the client is playing media."""
return self.state in [STATE_PLAYING, STATE_PAUSED]
@property
def _active_media_plexapi_type(self):
"""Get the active media type required by PlexAPI commands."""
if self.media_content_type is MEDIA_TYPE_MUSIC:
return "music"
return "video"
@property
@needs_session
def session_key(self):
"""Return current session key."""
return self.session.sessionKey
@property
@needs_session
def media_library_title(self):
"""Return the library name of playing media."""
return self.session.media_library_title
@property
@needs_session
def media_content_id(self):
"""Return the content ID of current playing media."""
return self.session.media_content_id
@property
@needs_session
def media_content_type(self):
"""Return the content type of current playing media."""
return self.session.media_content_type
@property
@needs_session
def media_content_rating(self):
"""Return the content rating of current playing media."""
return self.session.media_content_rating
@property
@needs_session
def media_artist(self):
"""Return the artist of current playing media, music track only."""
return self.session.media_artist
@property
@needs_session
def media_album_name(self):
"""Return the album name of current playing media, music track only."""
return self.session.media_album_name
@property
@needs_session
def media_album_artist(self):
"""Return the album artist of current playing media, music only."""
return self.session.media_album_artist
@property
@needs_session
def media_track(self):
"""Return the track number of current playing media, music only."""
return self.session.media_track
@property
@needs_session
def media_duration(self):
"""Return the duration of current playing media in seconds."""
return self.session.media_duration
@property
@needs_session
def media_position(self):
"""Return the duration of current playing media in seconds."""
return self.session.media_position
@property
@needs_session
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
return self.session.media_position_updated_at
@property
@needs_session
def media_image_url(self):
"""Return the image URL of current playing media."""
return self.session.media_image_url
@property
@needs_session
def media_summary(self):
"""Return the summary of current playing media."""
return self.session.media_summary
@property
@needs_session
def media_title(self):
"""Return the title of current playing media."""
return self.session.media_title
@property
@needs_session
def media_season(self):
"""Return the season of current playing media (TV Show only)."""
return self.session.media_season
@property
@needs_session
def media_series_title(self):
"""Return the title of the series of current playing media."""
return self.session.media_series_title
@property
@needs_session
def media_episode(self):
"""Return the episode of current playing media (TV Show only)."""
return self.session.media_episode
@property
def supported_features(self):
"""Flag media player features that are supported."""
if self.device and "playback" in self._device_protocol_capabilities:
return (
SUPPORT_PAUSE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_STOP
| SUPPORT_SEEK
| SUPPORT_VOLUME_SET
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
| SUPPORT_VOLUME_MUTE
| SUPPORT_BROWSE_MEDIA
)
return SUPPORT_BROWSE_MEDIA | SUPPORT_PLAY_MEDIA
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.setVolume(int(volume * 100), self._active_media_plexapi_type)
self._volume_level = volume # store since we can't retrieve
@property
def volume_level(self):
"""Return the volume level of the client (0..1)."""
if (
self._is_player_active
and self.device
and "playback" in self._device_protocol_capabilities
):
return self._volume_level
return None
@property
def is_volume_muted(self):
"""Return boolean if volume is currently muted."""
if self._is_player_active and self.device:
return self._volume_muted
return None
def mute_volume(self, mute):
"""Mute the volume.
Since we can't actually mute, we'll:
- On mute, store volume and set volume to 0
- On unmute, set volume to previously stored volume
"""
if not (self.device and "playback" in self._device_protocol_capabilities):
return
self._volume_muted = mute
if mute:
self._previous_volume_level = self._volume_level
self.set_volume_level(0)
else:
self.set_volume_level(self._previous_volume_level)
def media_play(self):
"""Send play command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.play(self._active_media_plexapi_type)
def media_pause(self):
"""Send pause command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.pause(self._active_media_plexapi_type)
def media_stop(self):
"""Send stop command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.stop(self._active_media_plexapi_type)
def media_seek(self, position):
"""Send the seek command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.seekTo(position * 1000, self._active_media_plexapi_type)
def media_next_track(self):
"""Send next track command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.skipNext(self._active_media_plexapi_type)
def media_previous_track(self):
"""Send previous track command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.skipPrevious(self._active_media_plexapi_type)
def play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
if not (self.device and "playback" in self._device_protocol_capabilities):
_LOGGER.debug(
"Client is not currently accepting playback controls: %s", self.name
)
return
src = json.loads(media_id)
if isinstance(src, int):
src = {"plex_key": src}
playqueue_id = src.pop("playqueue_id", None)
if playqueue_id:
try:
playqueue = self.plex_server.get_playqueue(playqueue_id)
except plexapi.exceptions.NotFound as err:
raise HomeAssistantError(
f"PlayQueue '{playqueue_id}' could not be found"
) from err
else:
shuffle = src.pop("shuffle", 0)
media = self.plex_server.lookup_media(media_type, **src)
if media is None:
_LOGGER.error("Media could not be found: %s", media_id)
return
_LOGGER.debug("Attempting to play %s on %s", media, self.name)
playqueue = self.plex_server.create_playqueue(media, shuffle=shuffle)
try:
self.device.playMedia(playqueue)
except requests.exceptions.ConnectTimeout:
_LOGGER.error("Timed out playing on %s", self.name)
@property
def device_state_attributes(self):
"""Return the scene state attributes."""
attributes = {}
for attr in [
"media_content_rating",
"media_library_title",
"player_source",
"media_summary",
"username",
]:
value = getattr(self, attr, None)
if value:
attributes[attr] = value
return attributes
@property
def device_info(self):
"""Return a device description for device registry."""
if self.machine_identifier is None:
return None
return {
"identifiers": {(PLEX_DOMAIN, self.machine_identifier)},
"manufacturer": self.device_platform or "Plex",
"model": self.device_product or self.device_make,
"name": self.name,
"sw_version": self.device_version,
"via_device": (PLEX_DOMAIN, self.plex_server.machine_identifier),
}
async def async_browse_media(self, media_content_type=None, media_content_id=None):
"""Implement the websocket media browsing helper."""
is_internal = is_internal_request(self.hass)
return await self.hass.async_add_executor_job(
browse_media,
self,
is_internal,
media_content_type,
media_content_id,
)
async def async_get_browse_image(
self, media_content_type, media_content_id, media_image_id=None
):
"""Get media image from Plex server."""
image_url = self.plex_server.thumbnail_cache.get(media_content_id)
if image_url:
result = await self._async_fetch_image(image_url)
return result
return (None, None)
|
the-stack_0_15080 | from django import template
register = template.Library()
@register.filter_function
def attr(obj, arg1):
"""
Use in templates:
{% load field_attrs %}
then, in a form field:
{{ form.phone|attr:"style=width:143px;background-color:yellow"|attr:"size=30" }}
"""
att, value = arg1.split("=")
obj.field.widget.attrs[att] = value
return obj
|
the-stack_0_15085 | import os
from celery.schedules import crontab
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
# Set in local_settings.py
SECRET_KEY = 'SECRET_SECRET_SECRET'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv('DEBUG_STATUS', True)
ALLOWED_HOSTS = ['*']
# Application definition
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/login/'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'simple_pagination',
'compressor',
'common',
'accounts',
'cases',
'contacts',
'emails',
'leads',
'opportunity',
'planner',
'sorl.thumbnail',
'phonenumber_field',
'storages',
'marketing',
'tasks',
'invoices',
'events',
'teams',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'crm.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates"), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'crm.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'dj_crm',
'USER': 'postgres',
'PASSWORD': 'root',
'HOST': os.getenv('DB_HOST', '127.0.0.1'),
'PORT': os.getenv('DB_PORT', '5432')
}
}
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static"), ]
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# EMAIL_HOST = 'localhost'
# EMAIL_PORT = 25
# AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend', )
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = os.getenv('SG_USER', '')
EMAIL_HOST_PASSWORD = os.getenv('SG_PWD', '')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
AUTH_USER_MODEL = 'common.User'
STORAGE_TYPE = os.getenv('STORAGE_TYPE', 'normal')
if STORAGE_TYPE == 'normal':
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
STATICFILES_DIRS = (BASE_DIR + '/static',)
COMPRESS_ROOT = BASE_DIR + '/static/'
elif STORAGE_TYPE == 's3-storage':
AWS_STORAGE_BUCKET_NAME = AWS_BUCKET_NAME = os.getenv('AWSBUCKETNAME', '')
AM_ACCESS_KEY = AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID', '')
AM_PASS_KEY = AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY', '')
S3_DOMAIN = AWS_S3_CUSTOM_DOMAIN = str(AWS_BUCKET_NAME) + '.s3.amazonaws.com'
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': 'max-age=86400',
}
STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
DEFAULT_S3_PATH = "media"
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
STATIC_S3_PATH = "static"
COMPRESS_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter', 'compressor.filters.cssmin.CSSMinFilter']
COMPRESS_JS_FILTERS = ['compressor.filters.jsmin.JSMinFilter']
COMPRESS_REBUILD_TIMEOUT = 5400
MEDIA_ROOT = '/%s/' % DEFAULT_S3_PATH
MEDIA_URL = '//%s/%s/' % (S3_DOMAIN, DEFAULT_S3_PATH)
STATIC_ROOT = "/%s/" % STATIC_S3_PATH
STATIC_URL = 'https://%s/' % (S3_DOMAIN)
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
CORS_ORIGIN_ALLOW_ALL = True
AWS_IS_GZIPPED = True
AWS_ENABLED = True
AWS_S3_SECURE_URLS = True
COMPRESS_ROOT = BASE_DIR + '/static/'
COMPRESS_ENABLED = True
COMPRESS_OFFLINE_CONTEXT = {
'STATIC_URL': 'STATIC_URL',
}
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter', 'compressor.filters.cssmin.CSSMinFilter']
COMPRESS_REBUILD_TIMEOUT = 5400
COMPRESS_OUTPUT_DIR = 'CACHE'
COMPRESS_URL = STATIC_URL
COMPRESS_PRECOMPILERS = (
('text/less', 'lessc {infile} {outfile}'),
('text/x-sass', 'sass {infile} {outfile}'),
('text/x-scss', 'sass {infile} {outfile}'),
)
COMPRESS_OFFLINE_CONTEXT = {
'STATIC_URL': 'STATIC_URL',
}
DEFAULT_FROM_EMAIL = '[email protected]'
# celery Tasks
CELERY_BROKER_URL = 'redis://localhost:6379'
CELERY_RESULT_BACKEND = 'redis://localhost:6379'
CELERY_BEAT_SCHEDULE = {
"runs-campaign-for-every-thiry-minutes": {
"task": "marketing.tasks.run_all_campaigns",
"schedule": crontab(minute=30, hour='*')
},
"runs-campaign-for-every-five-minutes": {
"task": "marketing.tasks.list_all_bounces_unsubscribes",
"schedule": crontab(minute='*/5')
},
"runs-scheduled-campaigns-for-every-one-hour": {
"task": "marketing.tasks.send_scheduled_campaigns",
"schedule": crontab(hour='*/1')
},
"runs-scheduled-emails-for-accounts-every-five-minutes": {
"task": "accounts.tasks.send_scheduled_emails",
"schedule": crontab(minute='*/1')
}
}
MAIL_SENDER = 'AMAZON'
INACTIVE_MAIL_SENDER = 'MANDRILL'
AM_ACCESS_KEY = os.getenv('AM_ACCESS_KEY', '')
AM_PASS_KEY = os.getenv('AM_PASS_KEY', '')
AWS_REGION = os.getenv('AWS_REGION', '')
MGUN_API_URL = os.getenv('MGUN_API_URL', '')
MGUN_API_KEY = os.getenv('MGUN_API_KEY', '')
SG_USER = os.getenv('SG_USER', '')
SG_PWD = os.getenv('SG_PWD', '')
MANDRILL_API_KEY = os.getenv('MANDRILL_API_KEY', '')
ADMIN_EMAIL = "[email protected]"
URL_FOR_LINKS = "http://demo.django-crm.io"
try:
from .dev_settings import *
except ImportError:
pass
GP_CLIENT_ID = os.getenv('GP_CLIENT_ID', False)
GP_CLIENT_SECRET = os.getenv('GP_CLIENT_SECRET', False)
ENABLE_GOOGLE_LOGIN = os.getenv('ENABLE_GOOGLE_LOGIN', False)
MARKETING_REPLY_EMAIL = '[email protected]'
PASSWORD_RESET_TIMEOUT_DAYS = 3
SENTRY_ENABLED = os.getenv('SENTRY_ENABLED', False)
if SENTRY_ENABLED and not DEBUG:
if os.getenv('SENTRYDSN') is not None:
RAVEN_CONFIG = {
'dsn': os.getenv('SENTRYDSN', ''),
}
INSTALLED_APPS = INSTALLED_APPS + [
'raven.contrib.django.raven_compat',
]
MIDDLEWARE = [
'raven.contrib.django.raven_compat.middleware.Sentry404CatchMiddleware',
'raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware',
] + MIDDLEWARE
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
},
}
# Load the local settings file if it exists
if os.path.isfile('crm/local_settings.py'):
from .local_settings import *
else:
print("No local settings file found")
|
the-stack_0_15086 | #!/usr/bin/env python3
import os
import stat
def getFileList(fileDir):
fileList = os.listdir(fileDir)
newFileList = []
for eachFile in fileList:
if eachFile[:31] not in newFileList:
newFileList.append(eachFile)
return newFileList
def createSubmitFile(eventRootName):
fileName = str(eventRootName).split("_")[4]
submitComtent = """Universe = vanilla
Notification = Error
Initialdir = /star/u/jhai/selfSC_GL/test/submit/submit6.8_9
Executable = $(Initialdir)/run47.csh
Arguments = $(Process)
Log = $(Initialdir)/log/job47_$(Process).log
Output = $(Initialdir)/log/job47_$(Process).out
Error = $(Initialdir)/log/job47_$(Process).err
GetEnv = True
+Job_Type = "cas"
Queue 1"""
submitComtentResult = submitComtent.split("\n")
fileObject = open(fileName + ".con", "w")
for eachLine in submitComtentResult:
if eachLine.lower().startswith("executable"):
eachLine = "Executable = $(Initialdir)/" + fileName + ".csh"
fileObject.write(eachLine+'\n')
fileObject.close()
def createRunShell(eventRootName):
fileName = str(eventRootName).split("_")[4]
shellContent = """#!/bin/csh
stardev
root4star -b -q -l 'doEvents_SCGL_Calib.C(5000,"./output/*19072018_raw_5000008*")'
"""
shellContentResult = shellContent.split("\n")
fileObject = open(fileName + ".csh", "w")
for eachLine in shellContentResult:
eachLine.strip('\n')
if eachLine.lower().startswith("root4star"):
eachLine = """root4star -b -q -l 'doEvents_SCGL_Calib.C(5000,"/star/u/jhai/scratch/test/""" + eventRootName + "*" + """")'"""
fileObject.write(eachLine+"\n")
fileObject.close()
os.chmod(fileName + ".csh", stat.S_IRWXU)
def main():
fileDir = "/star/u/jhai/scratch/test"
eventFileList = getFileList(fileDir)
for eachEventFile in eventFileList:
eachEventFile = eachEventFile[:31]
createRunShell(eachEventFile)
createSubmitFile(eachEventFile)
if __name__ == "__main__":
main()
|
the-stack_0_15087 | import autofit as af
import autolens as al
from test_autolens.integration.tests.imaging import runner
test_type = "lens__source_inversion"
test_name = "lens_both__source_rectangular"
data_type = "lens_light__source_smooth"
data_resolution = "lsst"
def make_pipeline(name, phase_folders, optimizer_class=af.MultiNest):
class SourcePix(al.PhaseImaging):
def customize_priors(self, results):
self.galaxies.lens.mass.centre.centre_0 = 0.0
self.galaxies.lens.mass.centre.centre_1 = 0.0
self.galaxies.lens.mass.einstein_radius = 1.6
self.galaxies.source.pixelization.shape_0 = 20.0
self.galaxies.source.pixelization.shape_1 = 20.0
phase1 = SourcePix(
phase_name="phase_1",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(
redshift=0.5,
light=al.lp.SphericalDevVaucouleurs,
mass=al.mp.EllipticalIsothermal,
),
source=al.GalaxyModel(
redshift=1.0,
pixelization=al.pix.Rectangular,
regularization=al.reg.Constant,
),
),
optimizer_class=optimizer_class,
)
phase1.optimizer.const_efficiency_mode = True
phase1.optimizer.n_live_points = 60
phase1.optimizer.sampling_efficiency = 0.8
return al.PipelineDataset(name, phase1)
if __name__ == "__main__":
import sys
runner.run(sys.modules[__name__])
|
the-stack_0_15089 | from pymixconf.jsonconf import JSONConf
import os
def test_yamlconf():
loader = JSONConf(config_directory="test/fixtures", environment_key="TEST_ENV")
os.environ["TEST_ENV"] = "dev"
data = loader.load_config()
expected = {
"flask": {
"port": 7000
},
"logging": {
"level": "INFO"
},
"custom": {
"users": {
"enabled": True,
"admins": ["steve"]
}
}
}
print(data)
assert data == expected
|
the-stack_0_15090 | """
Defines repositories and register toolchains for versions of the tools built
from source
"""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
_ALL_CONTENT = """\
filegroup(
name = "all_srcs",
srcs = glob(["**"]),
visibility = ["//visibility:public"],
)
"""
# buildifier: disable=unnamed-macro
def built_toolchains(cmake_version, make_version, ninja_version):
"""Register toolchains for built tools that will be built from source"""
_cmake_toolchain(cmake_version)
_make_toolchain(make_version)
_ninja_toolchain(ninja_version)
def _cmake_toolchain(version):
native.register_toolchains(
"@rules_foreign_cc//toolchains:built_cmake_toolchain",
)
if version == "3.19.6":
maybe(
http_archive,
name = "cmake_src",
build_file_content = _ALL_CONTENT,
sha256 = "ec87ab67c45f47c4285f204280c5cde48e1c920cfcfed1555b27fb3b1a1d20ba",
strip_prefix = "cmake-3.19.6",
urls = [
"https://github.com/Kitware/CMake/releases/download/v3.19.6/cmake-3.19.6.tar.gz",
],
)
return
fail("Unsupported cmake version: " + str(version))
def _make_toolchain(version):
native.register_toolchains(
"@rules_foreign_cc//toolchains:built_make_toolchain",
)
if version == "4.3":
maybe(
http_archive,
name = "gnumake_src",
build_file_content = _ALL_CONTENT,
sha256 = "e05fdde47c5f7ca45cb697e973894ff4f5d79e13b750ed57d7b66d8defc78e19",
strip_prefix = "make-4.3",
urls = [
"http://ftpmirror.gnu.org/gnu/make/make-4.3.tar.gz",
],
)
return
fail("Unsupported make version: " + str(version))
def _ninja_toolchain(version):
native.register_toolchains(
"@rules_foreign_cc//toolchains:built_ninja_toolchain",
)
if version == "1.10.2":
maybe(
http_archive,
name = "ninja_build_src",
build_file_content = _ALL_CONTENT,
sha256 = "ce35865411f0490368a8fc383f29071de6690cbadc27704734978221f25e2bed",
strip_prefix = "ninja-1.10.2",
urls = [
"https://github.com/ninja-build/ninja/archive/v1.10.2.tar.gz",
],
)
return
fail("Unsupported ninja version: " + str(version))
|
the-stack_0_15091 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import datetime
import json
import logging
import os
import time
import dateutil.tz
METRICS_DIR = os.environ.get("SAGEMAKER_METRICS_DIRECTORY", ".")
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class SageMakerFileMetricsWriter(object):
"""Writes metric data to file."""
def __init__(self, metrics_file_path=None):
self._metrics_file_path = metrics_file_path
self._file = None
self._closed = False
def log_metric(self, metric_name, value, timestamp=None, iteration_number=None):
"""Write a metric to file.
Args:
metric_name (str): The name of the metric.
value (str): The value of the metric.
timestamp (datetime): Timestamp of the metric.
iteration_number (int): Iteration number of the metric.
Raises:
SageMakerMetricsWriterException: If the metrics file is closed.
"""
raw_metric_data = _RawMetricData(
metric_name=metric_name, value=value, timestamp=timestamp, iteration_number=iteration_number
)
try:
logging.debug("Writing metric: %s", raw_metric_data)
self._file.write(json.dumps(raw_metric_data.to_record()))
self._file.write("\n")
except AttributeError:
if self._closed:
raise SageMakerMetricsWriterException("log_metric called on a closed writer")
elif not self._file:
self._file = open(self._get_metrics_file_path(), "a", buffering=1)
self._file.write(json.dumps(raw_metric_data.to_record()))
self._file.write("\n")
else:
raise
def close(self):
"""Closes the metric file."""
if not self._closed and self._file:
self._file.close()
self._file = None # invalidate reference, causing subsequent log_metric to fail.
self._closed = True
def __enter__(self):
"""Return self"""
return self
def __exit__(self, type, value, traceback):
"""Execute self.close()"""
self.close()
def __del__(self):
"""Execute self.close()"""
self.close()
def _get_metrics_file_path(self):
pid_filename = "{}.json".format(str(os.getpid()))
metrics_file_path = self._metrics_file_path or os.path.join(METRICS_DIR, pid_filename)
logging.debug("metrics_file_path=" + metrics_file_path)
return metrics_file_path
class SageMakerMetricsWriterException(Exception):
"""SageMakerMetricsWriterException"""
def __init__(self, message, errors=None):
super().__init__(message)
if errors:
self.errors = errors
class _RawMetricData(object):
MetricName = None
Value = None
Timestamp = None
IterationNumber = None
def __init__(self, metric_name, value, timestamp=None, iteration_number=None):
if timestamp is None:
timestamp = time.time()
elif isinstance(timestamp, datetime.datetime):
# If the input is a datetime then convert it to UTC time. Assume a naive datetime is in local timezone
if not timestamp.tzinfo:
timestamp = timestamp.replace(tzinfo=dateutil.tz.tzlocal())
timestamp = (timestamp - timestamp.utcoffset()).replace(tzinfo=datetime.timezone.utc)
timestamp = timestamp.timestamp()
else:
timestamp = float(timestamp)
if timestamp < (time.time() - 1209600) or timestamp > (time.time() + 7200):
raise ValueError(
"Supplied timestamp %f is invalid."
" Timestamps must be between two weeks before and two hours from now." % timestamp
)
value = float(value)
self.MetricName = metric_name
self.Value = float(value)
self.Timestamp = timestamp
if iteration_number is not None:
assert isinstance(iteration_number, int)
self.IterationNumber = iteration_number
def to_record(self):
return self.__dict__
def __str__(self):
return repr(self)
def __repr__(self):
return "{}({})".format(
type(self).__name__,
",".join(["{}={}".format(k, repr(v)) for k, v in vars(self).items()]),
)
|
the-stack_0_15092 | from igramscraper.instagram import Instagram
import urllib.request
import argparse
import os
def get_media_from_hashtag(tag, media_type, quality, max_images, path):
instagram = Instagram()
medias = instagram.get_medias_by_tag(tag, count=max_images)
count = 1
for media in medias:
media.type = 'image' if media.type == 'sidecar' or media.type == 'carousel' else media.type
# Extracting Image URL
if (media.type == 'image' and media_type == 'image' or media_type == 'all') and not media.is_ad:
# Get the links form media
all_quality = ['low', 'standard', 'high']
url = media.__getattribute__(f"image_{quality}_resolution_url")
# If the preferred quality is not available
if not url:
all_quality.remove(quality)
for q in all_quality:
url = media.__getattribute__(
f"image_{q}_resolution_url")
if url:
break
# Extracting Video URL
if (media.type == 'video' and media_type == 'all' or media_type == 'video') and not media.is_ad:
# Get the links form media
media = instagram.get_media_by_id(media.identifier)
url = media.video_standard_resolution_url or media.video_low_bandwidth_url or media.video_low_resolution_url or media.video_url
# Downloading the media
if url:
urllib.request.urlretrieve(
url, f"{path}/{media.type}s/{media.type}{count}.{'jpg' if media.type == 'image' else 'mp4'}")
print(f"{count}/{max_images} media downloaded")
else:
print(
f"[{count}] Failed downloading the media {media.link} (id - {media.identifier})")
count += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Get All Post From Instagram Hashtag')
parser.add_argument('-t', '--tag', required=True, help="valid tag name")
parser.add_argument('-p', '--path', required=False,
help="Path to save media", default="media")
parser.add_argument('-mm', '--max-media', required=False,
help="Max number of media to download", type=int, default=10)
parser.add_argument('-mt', '--media-type', required=False,
help="For Photos => `image` Videos => `video` All => `all` ", default="all")
parser.add_argument('-q', '--quality', required=False,
help="Media Quality Use either of `low`, `standard` or `high`", default="standard")
arguments = parser.parse_args()
# Checking
if arguments.media_type not in ["video", "image", "all"]:
raise ValueError("Media Type should be either videos, images or all")
if arguments.quality not in ["low", "high", "standard"]:
raise ValueError("Quality should be either low, standard or high")
if not os.path.exists(arguments.path):
print("Media path not found! \nCreating media path!")
os.mkdir(arguments.path)
if not os.path.exists(arguments.path + "/images"):
os.mkdir(arguments.path + "/images")
if not os.path.exists(arguments.path + "/videos"):
os.mkdir(arguments.path + "/videos")
# Running
get_media_from_hashtag(tag=arguments.tag, media_type=arguments.media_type,
quality=arguments.quality, max_images=arguments.max_media, path=arguments.path)
|
the-stack_0_15093 | from cricsheet.io_xml.parsers.parser import Parser
class MatchParser(Parser):
def __init__(self, match_id):
self.match_id = match_id
self.metadata_parser = MatchMetadataParser()
self.outcome_parser = MatchOutcomeParser()
self.umpire_parser = UmpireParser()
def parse(self, raw):
data = {'id': self.match_id}
data.update(self.metadata_parser.parse(raw))
data.update(self.outcome_parser.parse(raw['outcome']))
if 'umpires' in raw:
data.update(self.umpire_parser.parse(raw['umpires']['umpire']))
return data
class MatchMetadataParser(Parser):
def __init__(self):
pass
def parse(self, metadata):
if 'player_of_match' in metadata:
if type(metadata['player_of_match']['player_of_match']) == list:
player_of_match = metadata['player_of_match']['player_of_match'][0]
else:
player_of_match = metadata['player_of_match']['player_of_match']
else:
player_of_match = None
return {
'gender': metadata['gender'],
'match_type': metadata['match_type'],
'competition': metadata['competition'] if 'competition' in metadata else None,
'max_overs': metadata['overs'] if 'overs' in metadata else None,
'venue': metadata['venue'] if 'venue' in metadata else None,
'city': metadata['city'] if 'city' in metadata else None,
'start_date': metadata['dates']['date'][0],
'end_date': metadata['dates']['date'][-1],
'team_home': metadata['teams']['team'][0],
'team_away': metadata['teams']['team'][1],
'player_of_match': player_of_match,
'toss_won_by': metadata['toss']['winner'],
'toss_decision': metadata['toss']['decision']
}
class MatchOutcomeParser(Parser):
def __init__(self):
pass
def parse(self, outcome):
has_winner = (any(result in outcome for result in ('winner', 'eliminator')))
result = 'win' if has_winner else outcome['result']
method = outcome['method'] if 'method' in outcome else \
'eliminator' if 'eliminator' in outcome else None
winner = outcome['winner'] if 'winner' in outcome else \
outcome['eliminator'] if 'eliminator' in outcome else None
by = outcome['by'] if 'by' in outcome else None
#print(has_winner, result, method, winner, by)
if (not has_winner) or ('eliminator' in outcome) or (method == 'Awarded'):
won_by_type = None
won_by_value = None
elif by is not None:
if ('innings' in by) and ('wickets' in by):
won_by_type = 'innings_and_wickets'
won_by_value = by['wickets']
elif ('innings' not in by) and ('wickets' in by):
won_by_type = 'wickets'
won_by_value = by['wickets']
elif ('innings' in by) and ('runs' in by):
won_by_type = 'innings_and_runs'
won_by_value = by['runs']
elif ('innings' not in by) and ('runs' in by):
won_by_type = 'runs'
won_by_value = by['runs']
else:
won_by_type = None
won_by_value = None
return {
'result': result,
'method': method,
'winner': winner,
'won_by_type': won_by_type,
'won_by_value': won_by_value
}
class UmpireParser(Parser):
def __init__(self):
pass
def parse(self, umpires):
if len(umpires) == 2:
first, second = umpires
third, forth = None, None
if len(umpires) == 3:
first, second, third = umpires
forth = None
if len(umpires) == 4:
first, second, third, forth = umpires
return {
'umpire_first': first,
'umpire_second': second,
'umpire_third': third,
'umpire_forth': forth
}
|
the-stack_0_15095 | """Remove processed notebooks from disk"""
import argparse
import shutil
from reproducemegit.jupyter_reproducibility import config
import os
from reproducemegit.jupyter_reproducibility import consts
from reproducemegit.jupyter_reproducibility.db import Repository, Notebook, connect
from reproducemegit.jupyter_reproducibility.utils import vprint, StatusLogger, mount_basedir, check_exit, savepid
def apply(session, status, keep, count, interval, reverse, check):
"""Compress repositories"""
filters = [
Repository.processed.op('&')(consts.R_COMPRESS_OK) == 0,
]
if interval:
filters += [
Repository.id >= interval[0],
Repository.id <= interval[1],
]
query = session.query(Repository).filter(*filters)
if count:
print(query.count())
return
if reverse:
query = query.order_by(
Repository.id.desc()
)
else:
query = query.order_by(
Repository.id.asc()
)
for repository in query:
if check_exit(check):
vprint(0, "Found .exit file. Exiting")
return
status.report()
vprint(0, "Compressing {}".format(repository))
vprint(1, "Into {}".format(repository.zip_path))
with mount_basedir():
try:
if repository.path.exists():
commit = repository.get_commit()
if commit != repository.commit:
repository.processed |= consts.R_COMMIT_MISMATCH
repository.processed |= consts.R_COMPRESS_ERROR
if repository.zip_path.exists() or repository.compress():
if repository.processed & consts.R_COMPRESS_ERROR:
repository.processed -= consts.R_COMPRESS_ERROR
if not keep:
shutil.rmtree(str(repository.path), ignore_errors=True)
elif not repository.zip_path.exists():
if repository.processed & consts.R_COMPRESS_ERROR:
repository.processed -= consts.R_COMPRESS_ERROR
if not repository.path.exists():
repository.processed |= consts.R_UNAVAILABLE_FILES
vprint(1, "failed")
if repository.zip_path.exists():
vprint(1, "ok")
repository.processed |= consts.R_COMPRESS_OK
except Exception as err:
vprint(1, "Failed: {}".format(err))
session.add(repository)
status.count += 1
session.commit()
def main():
"""Main function"""
script_name = os.path.basename(__file__)[:-3]
parser = argparse.ArgumentParser(
description="Compress processed repositories")
parser.add_argument("-v", "--verbose", type=int, default=config.VERBOSE,
help="increase output verbosity")
parser.add_argument("-z", "--compression", type=str,
default=config.COMPRESSION,
help="compression algorithm")
parser.add_argument("-e", "--retry-errors", action='store_true',
help="retry errors")
parser.add_argument("-i", "--interval", type=int, nargs=2,
default=config.REPOSITORY_INTERVAL,
help="id interval")
parser.add_argument("-c", "--count", action='store_true',
help="count results")
parser.add_argument('-r', '--reverse', action='store_true',
help='iterate in reverse order')
parser.add_argument('-k', '--keep-uncompressed', action='store_true',
help='keep uncompressed files')
parser.add_argument('--check', type=str, nargs='*',
default={'all', script_name, script_name + '.py'},
help='check name in .exit')
args = parser.parse_args()
config.VERBOSE = args.verbose
status = None
if not args.count:
status = StatusLogger(script_name)
status.report()
config.COMPRESSION = args.compression
with connect() as session, savepid():
apply(
session,
status,
args.keep_uncompressed,
args.count,
args.interval,
args.reverse,
set(args.check)
)
if __name__ == "__main__":
main()
|
the-stack_0_15096 | # Lab 4 Multi-variable linear regression
import tensorflow as tf
import numpy as np
tf.set_random_seed(777) # for reproducibility
xy = np.loadtxt('data-01-test-score.csv', delimiter=',', dtype=np.float32)
x_data = xy[:, 0:-1]
y_data = xy[:, [-1]]
# Make sure the sape and data are OK
print(x_data.shape, x_data, len(x_data))
print(y_data.shape, y_data)
# placeholders for a tensor that will be always fed.
X = tf.placeholder(tf.float32, shape=[None, 3])
Y = tf.placeholder(tf.float32, shape=[None, 1])
W = tf.Variable(tf.random_normal([3, 1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
# Hypothesis
hypothesis = tf.matmul(X, W) + b
# Simplified cost function
cost = tf.reduce_mean(tf.square(hypothesis - Y))
# Minimize
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-5)
train = optimizer.minimize(cost)
# Launch the graph in a session.
sess = tf.Session()
# Initializes global variables in the graph.
sess.run(tf.global_variables_initializer())
for step in range(2001):
feed = {X: x_data, Y: y_data}
sess.run(train, feed_dict=feed)
if step % 10 == 0:
print(step, "Cost: ", sess.run(cost, feed_dict=feed),
"\nPrediction:\n", sess.run(hypothesis, feed_dict=feed))
# Ask my score
score = np.array([[100, 70, 101]])
print("Your score will be ", sess.run(hypothesis, feed_dict={X: score}))
score = np.array([[60, 70, 110], [90, 100, 80]])
print("Other scores will be ", sess.run(hypothesis, feed_dict={X: score}))
'''
Your score will be [[ 181.73277283]]
Other scores will be [[ 145.86265564]
[ 187.23129272]]
'''
|
the-stack_0_15097 | import os
import subprocess
import glob
import hashlib
import shutil
from common.basedir import BASEDIR
from selfdrive.swaglog import cloudlog
android_packages = ("ai.comma.plus.offroad",)
def get_installed_apks():
dat = subprocess.check_output(["pm", "list", "packages", "-f"], encoding='utf8').strip().split("\n")
ret = {}
for x in dat:
if x.startswith("package:"):
v, k = x.split("package:")[1].split("=")
ret[k] = v
return ret
def install_apk(path):
# can only install from world readable path
install_path = "/sdcard/%s" % os.path.basename(path)
shutil.copyfile(path, install_path)
ret = subprocess.call(["pm", "install", "-r", install_path])
os.remove(install_path)
return ret == 0
def start_offroad():
set_package_permissions()
system("am start -n ai.comma.plus.offroad/.MainActivity")
def set_package_permissions():
try:
output = subprocess.check_output(['dumpsys', 'package', 'ai.comma.plus.offroad'], encoding="utf-8")
given_permissions = output.split("runtime permissions")[1]
except Exception:
given_permissions = ""
wanted_permissions = ["ACCESS_FINE_LOCATION", "READ_PHONE_STATE", "READ_EXTERNAL_STORAGE"]
for permission in wanted_permissions:
if permission not in given_permissions:
pm_grant("ai.comma.plus.offroad", "android.permission." + permission)
appops_set("ai.comma.plus.offroad", "SU", "allow")
appops_set("ai.comma.plus.offroad", "WIFI_SCAN", "allow")
def appops_set(package, op, mode):
system(f"LD_LIBRARY_PATH= appops set {package} {op} {mode}")
def pm_grant(package, permission):
system(f"pm grant {package} {permission}")
def system(cmd):
try:
cloudlog.info("running %s" % cmd)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
cloudlog.event("running failed",
cmd=e.cmd,
output=e.output[-1024:],
returncode=e.returncode)
# *** external functions ***
def update_apks():
# install apks
installed = get_installed_apks()
install_apks = glob.glob(os.path.join(BASEDIR, "apk/*.apk"))
for apk in install_apks:
app = os.path.basename(apk)[:-4]
if app not in installed:
installed[app] = None
cloudlog.info("installed apks %s" % (str(installed), ))
for app in installed.keys():
apk_path = os.path.join(BASEDIR, "apk/"+app+".apk")
if not os.path.exists(apk_path):
continue
h1 = hashlib.sha1(open(apk_path, 'rb').read()).hexdigest()
h2 = None
if installed[app] is not None:
h2 = hashlib.sha1(open(installed[app], 'rb').read()).hexdigest()
cloudlog.info("comparing version of %s %s vs %s" % (app, h1, h2))
if h2 is None or h1 != h2:
cloudlog.info("installing %s" % app)
success = install_apk(apk_path)
if not success:
cloudlog.info("needing to uninstall %s" % app)
system("pm uninstall %s" % app)
success = install_apk(apk_path)
assert success
def pm_apply_packages(cmd):
for p in android_packages:
system("pm %s %s" % (cmd, p))
if __name__ == "__main__":
update_apks()
|
the-stack_0_15100 | import os
import argparse
import pandas as pd
from typing import Dict
from utils import parse_date, load_colnames
from parsers import (
confirmados_diarios_por_estado,
negativos_diarios_por_estado,
pruebas_pendientes_diarias_por_estado,
defunciones_diarias_por_estado,
hospitalizados_diarios_por_estado,
uci_diarios_por_estado
)
func_dict = dict()
func_dict['covid19_mex_confirmed.csv'] = confirmados_diarios_por_estado
func_dict['covid19_mex_negative.csv'] = negativos_diarios_por_estado
func_dict['covid19_mex_awaiting.csv'] = pruebas_pendientes_diarias_por_estado
func_dict['covid19_mex_deceased.csv'] = defunciones_diarias_por_estado
func_dict['covid19_mex_hospitalised.csv'] = hospitalizados_diarios_por_estado
func_dict['covid19_mex_icu.csv'] = uci_diarios_por_estado
def write_files(main_df: pd.DataFrame, colnames: Dict[str, str], data_dir: str):
for key, func in func_dict.items():
df = func(main_df, colnames)
filename = os.path.join(data_dir, key)
df.to_csv(filename)
return None
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='parse main dataset (zip file)')
parser.add_argument('input_file', help='file containing dataset')
parser.add_argument('-d', '--date', type=str, default=None,
help="specify the date to use as yyyymmdd")
args = parser.parse_args()
date_filename, date_iso = parse_date(args)
data_dir = os.path.join(os.pardir, 'data')
# main dataset
input_file = args.input_file
assert input_file.endswith(f'{date_filename}.zip')
try:
main_df = pd.read_csv(input_file, compression='zip')
colnames_dict = load_colnames('catalogo_entidades.csv') # names of 32 states
write_files(main_df, colnames_dict, data_dir)
print(f'Successfully parsed datos_abiertos_{date_filename}.zip')
except FileNotFoundError:
print('ERROR: Wrong date or missing file')
|
the-stack_0_15103 | import numpy as np
import os
import cv2
def imread(file_path, c=None):
if c is None:
im = cv2.imread(file_path)
else:
im = cv2.imread(file_path, c)
if im is None:
raise 'Can not read image'
if im.ndim == 3 and im.shape[2] == 3:
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
return im
def imwrite(file_path, image):
if image.ndim == 3 and image.shape[2] == 3:
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imwrite(file_path, image)
def fold_dir(folder):
if not os.path.exists(folder):
os.makedirs(folder)
return folder
def get_mask_BZ(img):
if img.ndim==3:
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray_img = img
threhold = np.mean(gray_img)/3-5
#cv2.imshow('gray_img', gray_img)
#cv2.waitKey()
#print(threhold)
_, mask = cv2.threshold(gray_img, max(5,threhold), 1, cv2.THRESH_BINARY)
#cv2.imshow('bz_mask', mask*255)
#cv2.waitKey()
nn_mask = np.zeros((mask.shape[0]+2,mask.shape[1]+2),np.uint8)
new_mask = (1-mask).astype(np.uint8)
# cv::floodFill(Temp, Point(0, 0), Scalar(255));
# _,new_mask,_,_ = cv2.floodFill(new_mask, nn_mask, [(0, 0),(0,new_mask.shape[0])], (0), cv2.FLOODFILL_MASK_ONLY)
_,new_mask,_,_ = cv2.floodFill(new_mask, nn_mask, (0,0), (0), cv2.FLOODFILL_MASK_ONLY)
_,new_mask,_,_ = cv2.floodFill(new_mask, nn_mask, (new_mask.shape[1]-1,new_mask.shape[0]-1), (0), cv2.FLOODFILL_MASK_ONLY)
mask = mask + new_mask
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (20, 20))
mask = cv2.erode(mask, kernel)
mask = cv2.dilate(mask, kernel)
return mask
def _get_center_by_edge(mask):
center=[0,0]
x=mask.sum(axis=1)
center[0]=np.where(x>x.max()*0.95)[0].mean()
x=mask.sum(axis=0)
center[1]=np.where(x>x.max()*0.95)[0].mean()
return center
def _get_radius_by_mask_center(mask,center):
mask=mask.astype(np.uint8)
ksize=max(mask.shape[1]//400*2+1,3)
kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(ksize,ksize))
mask=cv2.morphologyEx(mask, cv2.MORPH_GRADIENT, kernel)
# radius=
index=np.where(mask>0)
d_int=np.sqrt((index[0]-center[0])**2+(index[1]-center[1])**2)
b_count=np.bincount(np.ceil(d_int).astype(np.int))
radius=np.where(b_count>b_count.max()*0.995)[0].max()
return radius
def _get_circle_by_center_bbox(shape,center,bbox,radius):
center_mask=np.zeros(shape=shape).astype('uint8')
tmp_mask=np.zeros(shape=bbox[2:4])
center_tmp=(int(center[0]),int(center[1]))
center_mask=cv2.circle(center_mask,center_tmp[::-1],int(radius),(1),-1)
# center_mask[bbox[0]:bbox[0]+bbox[2],bbox[1]:bbox[1]+bbox[3]]=tmp_mask
# center_mask[bbox[0]:min(bbox[0]+bbox[2],center_mask.shape[0]),bbox[1]:min(bbox[1]+bbox[3],center_mask.shape[1])]=tmp_mask
return center_mask
def get_mask(img):
if img.ndim ==3:
#raise 'image dim is not 3'
g_img=cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
#cv2.imshow('ImageWindow', g_img)
#cv2.waitKey()
elif img.ndim == 2:
g_img =img.copy()
else:
raise 'image dim is not 1 or 3'
h,w = g_img.shape
shape=g_img.shape[0:2]
#g_img = cv2.resize(g_img,(0,0),fx = 0.5,fy = 0.5)
tg_img=cv2.normalize(g_img, None, 0, 255, cv2.NORM_MINMAX)
tmp_mask=get_mask_BZ(tg_img)
center=_get_center_by_edge(tmp_mask)
#bbox=_get_bbox_by_mask(tmp_mask)
#print(center)
#cv2.imshow('ImageWindow', tmp_mask*255)
#cv2.waitKey()
radius=_get_radius_by_mask_center(tmp_mask,center)
#resize back
#center = [center[0]*2,center[1]*2]
#radius = int(radius*2)
center = [center[0], center[1]]
radius = int(radius)
s_h = max(0,int(center[0] - radius))
s_w = max(0, int(center[1] - radius))
bbox = (s_h, s_w, min(h-s_h,2 * radius), min(w-s_w,2 * radius))
tmp_mask=_get_circle_by_center_bbox(shape,center,bbox,radius)
return tmp_mask,bbox,center,radius
def mask_image(img,mask):
img[mask<=0,...]=0
return img
def remove_back_area(img,bbox=None,border=None):
image=img
if border is None:
border=np.array((bbox[0],bbox[0]+bbox[2],bbox[1],bbox[1]+bbox[3],img.shape[0],img.shape[1]),dtype=np.int)
image=image[border[0]:border[1],border[2]:border[3],...]
return image,border
def supplemental_black_area(img,border=None):
image=img
if border is None:
h,v=img.shape[0:2]
max_l=max(h,v)
if image.ndim>2:
image=np.zeros(shape=[max_l,max_l,img.shape[2]],dtype=img.dtype)
else:
image=np.zeros(shape=[max_l,max_l],dtype=img.dtype)
border=(int(max_l/2-h/2),int(max_l/2-h/2)+h,int(max_l/2-v/2),int(max_l/2-v/2)+v,max_l)
else:
max_l=border[4]
if image.ndim>2:
image=np.zeros(shape=[max_l,max_l,img.shape[2]],dtype=img.dtype)
else:
image=np.zeros(shape=[max_l,max_l],dtype=img.dtype)
image[border[0]:border[1],border[2]:border[3],...]=img
return image,border
def process_without_gb(img, label,radius_list,centre_list_w, centre_list_h):
# preprocess images
# img : origin image
# tar_height: height of tar image
# return:
# result_img: preprocessed image
# borders: remove border, supplement mask
# mask: mask for preprocessed image
borders = []
mask, bbox, center, radius = get_mask(img)
#print('center is: ',center)
#print('radius is: ',radius)
r_img = mask_image(img, mask)
r_img, r_border = remove_back_area(r_img,bbox=bbox)
mask, _ = remove_back_area(mask,border=r_border)
label, _ = remove_back_area(label,bbox=bbox)
borders.append(r_border)
r_img,sup_border = supplemental_black_area(r_img)
#print(r_img.shape)
label,sup_border = supplemental_black_area(label)
mask,_ = supplemental_black_area(mask,border=sup_border)
borders.append(sup_border)
radius_list.append(radius)
centre_list_w.append(int(center[0]))
centre_list_h.append(int(center[1]))
return r_img,borders,(mask*255).astype(np.uint8),label, radius_list,centre_list_w, centre_list_h
|
the-stack_0_15105 | import tty
import sys
import curses
import datetime
import locale
from decimal import Decimal
import getpass
import electrum
from electrum.util import format_satoshis, set_verbosity
from electrum.bitcoin import is_address, COIN, TYPE_ADDRESS
from electrum.transaction import TxOutput
from electrum.wallet import Wallet
from electrum.storage import WalletStorage
from electrum.network import NetworkParameters
from electrum.interface import deserialize_server
_ = lambda x:x
class ElectrumGui:
def __init__(self, config, daemon, plugins):
self.config = config
self.network = daemon.network
storage = WalletStorage(config.get_wallet_path())
if not storage.file_exists():
print("Wallet not found. try 'electrum create'")
exit()
if storage.is_encrypted():
password = getpass.getpass('Password:', stream=None)
storage.decrypt(password)
self.wallet = Wallet(storage)
self.wallet.start_network(self.network)
self.contacts = self.wallet.contacts
locale.setlocale(locale.LC_ALL, '')
self.encoding = locale.getpreferredencoding()
self.stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLUE)
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_CYAN)
curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_WHITE)
self.stdscr.keypad(1)
self.stdscr.border(0)
self.maxy, self.maxx = self.stdscr.getmaxyx()
self.set_cursor(0)
self.w = curses.newwin(10, 50, 5, 5)
set_verbosity(False)
self.tab = 0
self.pos = 0
self.popup_pos = 0
self.str_recipient = ""
self.str_description = ""
self.str_amount = ""
self.str_fee = ""
self.history = None
if self.network:
self.network.register_callback(self.update, ['wallet_updated', 'network_updated'])
self.tab_names = [_("History"), _("Send"), _("Receive"), _("Addresses"), _("Contacts"), _("Banner")]
self.num_tabs = len(self.tab_names)
def set_cursor(self, x):
try:
curses.curs_set(x)
except Exception:
pass
def restore_or_create(self):
pass
def verify_seed(self):
pass
def get_string(self, y, x):
self.set_cursor(1)
curses.echo()
self.stdscr.addstr( y, x, " "*20, curses.A_REVERSE)
s = self.stdscr.getstr(y,x)
curses.noecho()
self.set_cursor(0)
return s
def update(self, event):
self.update_history()
if self.tab == 0:
self.print_history()
self.refresh()
def print_history(self):
width = [20, 40, 14, 14]
delta = (self.maxx - sum(width) - 4)/3
format_str = "%"+"%d"%width[0]+"s"+"%"+"%d"%(width[1]+delta)+"s"+"%"+"%d"%(width[2]+delta)+"s"+"%"+"%d"%(width[3]+delta)+"s"
if self.history is None:
self.update_history()
self.print_list(self.history[::-1], format_str%( _("Date"), _("Description"), _("Amount"), _("Balance")))
def update_history(self):
width = [20, 40, 14, 14]
delta = (self.maxx - sum(width) - 4)/3
format_str = "%"+"%d"%width[0]+"s"+"%"+"%d"%(width[1]+delta)+"s"+"%"+"%d"%(width[2]+delta)+"s"+"%"+"%d"%(width[3]+delta)+"s"
b = 0
self.history = []
for tx_hash, tx_mined_status, value, balance in self.wallet.get_history():
if tx_mined_status.conf:
timestamp = tx_mined_status.timestamp
try:
time_str = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
except Exception:
time_str = "------"
else:
time_str = 'unconfirmed'
label = self.wallet.get_label(tx_hash)
if len(label) > 40:
label = label[0:37] + '...'
self.history.append( format_str%( time_str, label, format_satoshis(value, whitespaces=True), format_satoshis(balance, whitespaces=True) ) )
def print_balance(self):
if not self.network:
msg = _("Offline")
elif self.network.is_connected():
if not self.wallet.up_to_date:
msg = _("Synchronizing...")
else:
c, u, x = self.wallet.get_balance()
msg = _("Balance")+": %f "%(Decimal(c) / COIN)
if u:
msg += " [%f unconfirmed]"%(Decimal(u) / COIN)
if x:
msg += " [%f unmatured]"%(Decimal(x) / COIN)
else:
msg = _("Not connected")
self.stdscr.addstr( self.maxy -1, 3, msg)
for i in range(self.num_tabs):
self.stdscr.addstr( 0, 2 + 2*i + len(''.join(self.tab_names[0:i])), ' '+self.tab_names[i]+' ', curses.A_BOLD if self.tab == i else 0)
self.stdscr.addstr(self.maxy -1, self.maxx-30, ' '.join([_("Settings"), _("Network"), _("Quit")]))
def print_receive(self):
addr = self.wallet.get_receiving_address()
self.stdscr.addstr(2, 1, "Address: "+addr)
self.print_qr(addr)
def print_contacts(self):
messages = map(lambda x: "%20s %45s "%(x[0], x[1][1]), self.contacts.items())
self.print_list(messages, "%19s %15s "%("Key", "Value"))
def print_addresses(self):
fmt = "%-35s %-30s"
messages = map(lambda addr: fmt % (addr, self.wallet.labels.get(addr,"")), self.wallet.get_addresses())
self.print_list(messages, fmt % ("Address", "Label"))
def print_edit_line(self, y, label, text, index, size):
text += " "*(size - len(text) )
self.stdscr.addstr( y, 2, label)
self.stdscr.addstr( y, 15, text, curses.A_REVERSE if self.pos%6==index else curses.color_pair(1))
def print_send_tab(self):
self.stdscr.clear()
self.print_edit_line(3, _("Pay to"), self.str_recipient, 0, 40)
self.print_edit_line(5, _("Description"), self.str_description, 1, 40)
self.print_edit_line(7, _("Amount"), self.str_amount, 2, 15)
self.print_edit_line(9, _("Fee"), self.str_fee, 3, 15)
self.stdscr.addstr( 12, 15, _("[Send]"), curses.A_REVERSE if self.pos%6==4 else curses.color_pair(2))
self.stdscr.addstr( 12, 25, _("[Clear]"), curses.A_REVERSE if self.pos%6==5 else curses.color_pair(2))
self.maxpos = 6
def print_banner(self):
if self.network and self.network.banner:
banner = self.network.banner
banner = banner.replace('\r', '')
self.print_list(banner.split('\n'))
def print_qr(self, data):
import qrcode
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
s = StringIO()
self.qr = qrcode.QRCode()
self.qr.add_data(data)
self.qr.print_ascii(out=s, invert=False)
msg = s.getvalue()
lines = msg.split('\n')
try:
for i, l in enumerate(lines):
l = l.encode("utf-8")
self.stdscr.addstr(i+5, 5, l, curses.color_pair(3))
except curses.error:
m = 'error. screen too small?'
m = m.encode(self.encoding)
self.stdscr.addstr(5, 1, m, 0)
def print_list(self, lst, firstline = None):
lst = list(lst)
self.maxpos = len(lst)
if not self.maxpos: return
if firstline:
firstline += " "*(self.maxx -2 - len(firstline))
self.stdscr.addstr( 1, 1, firstline )
for i in range(self.maxy-4):
msg = lst[i] if i < len(lst) else ""
msg += " "*(self.maxx - 2 - len(msg))
m = msg[0:self.maxx - 2]
m = m.encode(self.encoding)
self.stdscr.addstr( i+2, 1, m, curses.A_REVERSE if i == (self.pos % self.maxpos) else 0)
def refresh(self):
if self.tab == -1: return
self.stdscr.border(0)
self.print_balance()
self.stdscr.refresh()
def main_command(self):
c = self.stdscr.getch()
print(c)
cc = curses.unctrl(c).decode()
if c == curses.KEY_RIGHT: self.tab = (self.tab + 1)%self.num_tabs
elif c == curses.KEY_LEFT: self.tab = (self.tab - 1)%self.num_tabs
elif c == curses.KEY_DOWN: self.pos +=1
elif c == curses.KEY_UP: self.pos -= 1
elif c == 9: self.pos +=1 # tab
elif cc in ['^W', '^C', '^X', '^Q']: self.tab = -1
elif cc in ['^N']: self.network_dialog()
elif cc == '^S': self.settings_dialog()
else: return c
if self.pos<0: self.pos=0
if self.pos>=self.maxpos: self.pos=self.maxpos - 1
def run_tab(self, i, print_func, exec_func):
while self.tab == i:
self.stdscr.clear()
print_func()
self.refresh()
c = self.main_command()
if c: exec_func(c)
def run_history_tab(self, c):
if c == 10:
out = self.run_popup('',["blah","foo"])
def edit_str(self, target, c, is_num=False):
# detect backspace
cc = curses.unctrl(c).decode()
if c in [8, 127, 263] and target:
target = target[:-1]
elif not is_num or cc in '0123456789.':
target += cc
return target
def run_send_tab(self, c):
if self.pos%6 == 0:
self.str_recipient = self.edit_str(self.str_recipient, c)
if self.pos%6 == 1:
self.str_description = self.edit_str(self.str_description, c)
if self.pos%6 == 2:
self.str_amount = self.edit_str(self.str_amount, c, True)
elif self.pos%6 == 3:
self.str_fee = self.edit_str(self.str_fee, c, True)
elif self.pos%6==4:
if c == 10: self.do_send()
elif self.pos%6==5:
if c == 10: self.do_clear()
def run_receive_tab(self, c):
if c == 10:
out = self.run_popup('Address', ["Edit label", "Freeze", "Prioritize"])
def run_contacts_tab(self, c):
if c == 10 and self.contacts:
out = self.run_popup('Address', ["Copy", "Pay to", "Edit label", "Delete"]).get('button')
key = list(self.contacts.keys())[self.pos%len(self.contacts.keys())]
if out == "Pay to":
self.tab = 1
self.str_recipient = key
self.pos = 2
elif out == "Edit label":
s = self.get_string(6 + self.pos, 18)
if s:
self.wallet.labels[key] = s
def run_banner_tab(self, c):
self.show_message(repr(c))
pass
def main(self):
tty.setraw(sys.stdin)
try:
while self.tab != -1:
self.run_tab(0, self.print_history, self.run_history_tab)
self.run_tab(1, self.print_send_tab, self.run_send_tab)
self.run_tab(2, self.print_receive, self.run_receive_tab)
self.run_tab(3, self.print_addresses, self.run_banner_tab)
self.run_tab(4, self.print_contacts, self.run_contacts_tab)
self.run_tab(5, self.print_banner, self.run_banner_tab)
except curses.error as e:
raise Exception("Error with curses. Is your screen too small?") from e
finally:
tty.setcbreak(sys.stdin)
curses.nocbreak()
self.stdscr.keypad(0)
curses.echo()
curses.endwin()
def do_clear(self):
self.str_amount = ''
self.str_recipient = ''
self.str_fee = ''
self.str_description = ''
def do_send(self):
if not is_address(self.str_recipient):
self.show_message(_('Invalid MUE address'))
return
try:
amount = int(Decimal(self.str_amount) * COIN)
except Exception:
self.show_message(_('Invalid Amount'))
return
try:
fee = int(Decimal(self.str_fee) * COIN)
except Exception:
self.show_message(_('Invalid Fee'))
return
if self.wallet.has_password():
password = self.password_dialog()
if not password:
return
else:
password = None
try:
tx = self.wallet.mktx([TxOutput(TYPE_ADDRESS, self.str_recipient, amount)],
password, self.config, fee)
except Exception as e:
self.show_message(str(e))
return
if self.str_description:
self.wallet.labels[tx.txid()] = self.str_description
self.show_message(_("Please wait..."), getchar=False)
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except Exception as e:
self.show_message(repr(e))
else:
self.show_message(_('Payment sent.'))
self.do_clear()
#self.update_contacts_tab()
def show_message(self, message, getchar = True):
w = self.w
w.clear()
w.border(0)
for i, line in enumerate(message.split('\n')):
w.addstr(2+i,2,line)
w.refresh()
if getchar: c = self.stdscr.getch()
def run_popup(self, title, items):
return self.run_dialog(title, list(map(lambda x: {'type':'button','label':x}, items)), interval=1, y_pos = self.pos+3)
def network_dialog(self):
if not self.network:
return
net_params = self.network.get_parameters()
host, port, protocol = net_params.host, net_params.port, net_params.protocol
proxy_config, auto_connect = net_params.proxy, net_params.auto_connect
srv = 'auto-connect' if auto_connect else self.network.default_server
out = self.run_dialog('Network', [
{'label':'server', 'type':'str', 'value':srv},
{'label':'proxy', 'type':'str', 'value':self.config.get('proxy', '')},
], buttons = 1)
if out:
if out.get('server'):
server = out.get('server')
auto_connect = server == 'auto-connect'
if not auto_connect:
try:
host, port, protocol = deserialize_server(server)
except Exception:
self.show_message("Error:" + server + "\nIn doubt, type \"auto-connect\"")
return False
if out.get('server') or out.get('proxy'):
proxy = electrum.network.deserialize_proxy(out.get('proxy')) if out.get('proxy') else proxy_config
net_params = NetworkParameters(host, port, protocol, proxy, auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def settings_dialog(self):
fee = str(Decimal(self.config.fee_per_kb()) / COIN)
out = self.run_dialog('Settings', [
{'label':'Default fee', 'type':'satoshis', 'value': fee }
], buttons = 1)
if out:
if out.get('Default fee'):
fee = int(Decimal(out['Default fee']) * COIN)
self.config.set_key('fee_per_kb', fee, True)
def password_dialog(self):
out = self.run_dialog('Password', [
{'label':'Password', 'type':'password', 'value':''}
], buttons = 1)
return out.get('Password')
def run_dialog(self, title, items, interval=2, buttons=None, y_pos=3):
self.popup_pos = 0
self.w = curses.newwin( 5 + len(list(items))*interval + (2 if buttons else 0), 50, y_pos, 5)
w = self.w
out = {}
while True:
w.clear()
w.border(0)
w.addstr( 0, 2, title)
num = len(list(items))
numpos = num
if buttons: numpos += 2
for i in range(num):
item = items[i]
label = item.get('label')
if item.get('type') == 'list':
value = item.get('value','')
elif item.get('type') == 'satoshis':
value = item.get('value','')
elif item.get('type') == 'str':
value = item.get('value','')
elif item.get('type') == 'password':
value = '*'*len(item.get('value',''))
else:
value = ''
if value is None:
value = ''
if len(value)<20:
value += ' '*(20-len(value))
if 'value' in item:
w.addstr( 2+interval*i, 2, label)
w.addstr( 2+interval*i, 15, value, curses.A_REVERSE if self.popup_pos%numpos==i else curses.color_pair(1) )
else:
w.addstr( 2+interval*i, 2, label, curses.A_REVERSE if self.popup_pos%numpos==i else 0)
if buttons:
w.addstr( 5+interval*i, 10, "[ ok ]", curses.A_REVERSE if self.popup_pos%numpos==(numpos-2) else curses.color_pair(2))
w.addstr( 5+interval*i, 25, "[cancel]", curses.A_REVERSE if self.popup_pos%numpos==(numpos-1) else curses.color_pair(2))
w.refresh()
c = self.stdscr.getch()
if c in [ord('q'), 27]: break
elif c in [curses.KEY_LEFT, curses.KEY_UP]: self.popup_pos -= 1
elif c in [curses.KEY_RIGHT, curses.KEY_DOWN]: self.popup_pos +=1
else:
i = self.popup_pos%numpos
if buttons and c==10:
if i == numpos-2:
return out
elif i == numpos -1:
return {}
item = items[i]
_type = item.get('type')
if _type == 'str':
item['value'] = self.edit_str(item['value'], c)
out[item.get('label')] = item.get('value')
elif _type == 'password':
item['value'] = self.edit_str(item['value'], c)
out[item.get('label')] = item ['value']
elif _type == 'satoshis':
item['value'] = self.edit_str(item['value'], c, True)
out[item.get('label')] = item.get('value')
elif _type == 'list':
choices = item.get('choices')
try:
j = choices.index(item.get('value'))
except Exception:
j = 0
new_choice = choices[(j + 1)% len(choices)]
item['value'] = new_choice
out[item.get('label')] = item.get('value')
elif _type == 'button':
out['button'] = item.get('label')
break
return out
|
the-stack_0_15106 | from flask import Flask, render_template, request
import sqlite3 as sql
app = Flask(__name__)
import sqlite3
conn = sqlite3.connect('database.db')
# print("Opened database successfully")
# conn.execute('CREATE TABLE students (name TEXT, addr TEXT, city TEXT, pin TEXT)')
# print("Table created successfully")
# conn.close()
@app.route('/')
def home():
return render_template('index.html')
@app.route('/enternew')
def new_student():
return render_template('student.html')
@app.route('/addrec',methods = ['POST', 'GET'])
def addrec():
if request.method == 'POST':
try:
nm = request.form['nm']
addr = request.form['add']
city = request.form['city']
pin = request.form['pin']
with sql.connect("database.db") as con:
cur = con.cursor()
#cur.execute("INSERT INTO students (name,addr,city,pin) VALUES (?,?,?,?)",(nm,addr,city,pin) )
con.commit()
# msg = "Record successfully added"
except:
con.rollback()
msg = "error in insert operation"
finally:
return render_template("result.html",msg = msg)
con.close()
@app.route('/list')
def list():
con = sql.connect("database.db")
con.row_factory = sql.Row
cur = con.cursor()
cur.execute("select * from all_month")
rows = cur.fetchall();
return render_template("list.html",rows = rows)
if __name__ == '__main__':
app.run(debug = True) |
the-stack_0_15107 | """Module/script to byte-compile all .py files to .pyc (or .pyo) files.
When called as a script with arguments, this compiles the directories
given as arguments recursively; the -l option prevents it from
recursing into directories.
Without arguments, if compiles all modules on sys.path, without
recursing into subdirectories. (Even though it should do so for
packages -- for now, you'll have to deal with packages separately.)
See module py_compile for details of the actual byte-compilation.
"""
import os
import sys
import importlib.util
import py_compile
import struct
__all__ = ["compile_dir","compile_file","compile_path"]
def compile_dir(dir, maxlevels=10, ddir=None, force=False, rx=None,
quiet=False, legacy=False, optimize=-1):
"""Byte-compile all modules in the given directory tree.
Arguments (only dir is required):
dir: the directory to byte-compile
maxlevels: maximum recursion level (default 10)
ddir: the directory that will be prepended to the path to the
file as it is compiled into each byte-code file.
force: if True, force compilation, even if timestamps are up-to-date
quiet: if True, be quiet during compilation
legacy: if True, produce legacy pyc paths instead of PEP 3147 paths
optimize: optimization level or -1 for level of the interpreter
"""
if not quiet:
print('Listing {!r}...'.format(dir))
try:
names = os.listdir(dir)
except OSError:
print("Can't list {!r}".format(dir))
names = []
names.sort()
success = 1
for name in names:
if name == '__pycache__':
continue
fullname = os.path.join(dir, name)
if ddir is not None:
dfile = os.path.join(ddir, name)
else:
dfile = None
if not os.path.isdir(fullname):
if not compile_file(fullname, ddir, force, rx, quiet,
legacy, optimize):
success = 0
elif (maxlevels > 0 and name != os.curdir and name != os.pardir and
os.path.isdir(fullname) and not os.path.islink(fullname)):
if not compile_dir(fullname, maxlevels - 1, dfile, force, rx,
quiet, legacy, optimize):
success = 0
return success
def compile_file(fullname, ddir=None, force=False, rx=None, quiet=False,
legacy=False, optimize=-1):
"""Byte-compile one file.
Arguments (only fullname is required):
fullname: the file to byte-compile
ddir: if given, the directory name compiled in to the
byte-code file.
force: if True, force compilation, even if timestamps are up-to-date
quiet: if True, be quiet during compilation
legacy: if True, produce legacy pyc paths instead of PEP 3147 paths
optimize: optimization level or -1 for level of the interpreter
"""
success = 1
name = os.path.basename(fullname)
if ddir is not None:
dfile = os.path.join(ddir, name)
else:
dfile = None
if rx is not None:
mo = rx.search(fullname)
if mo:
return success
if os.path.isfile(fullname):
if legacy:
cfile = fullname + ('c' if __debug__ else 'o')
else:
if optimize >= 0:
cfile = importlib.util.cache_from_source(
fullname, debug_override=not optimize)
else:
cfile = importlib.util.cache_from_source(fullname)
cache_dir = os.path.dirname(cfile)
head, tail = name[:-3], name[-3:]
if tail == '.py':
if not force:
try:
mtime = int(os.stat(fullname).st_mtime)
expect = struct.pack('<4sl', importlib.util.MAGIC_NUMBER,
mtime)
with open(cfile, 'rb') as chandle:
actual = chandle.read(8)
if expect == actual:
return success
except OSError:
pass
if not quiet:
print('Compiling {!r}...'.format(fullname))
try:
ok = py_compile.compile(fullname, cfile, dfile, True,
optimize=optimize)
except py_compile.PyCompileError as err:
if quiet:
print('*** Error compiling {!r}...'.format(fullname))
else:
print('*** ', end='')
# escape non-printable characters in msg
msg = err.msg.encode(sys.stdout.encoding,
errors='backslashreplace')
msg = msg.decode(sys.stdout.encoding)
print(msg)
success = 0
except (SyntaxError, UnicodeError, OSError) as e:
if quiet:
print('*** Error compiling {!r}...'.format(fullname))
else:
print('*** ', end='')
print(e.__class__.__name__ + ':', e)
success = 0
else:
if ok == 0:
success = 0
return success
def compile_path(skip_curdir=1, maxlevels=0, force=False, quiet=False,
legacy=False, optimize=-1):
"""Byte-compile all module on sys.path.
Arguments (all optional):
skip_curdir: if true, skip current directory (default True)
maxlevels: max recursion level (default 0)
force: as for compile_dir() (default False)
quiet: as for compile_dir() (default False)
legacy: as for compile_dir() (default False)
optimize: as for compile_dir() (default -1)
"""
success = 1
for dir in sys.path:
if (not dir or dir == os.curdir) and skip_curdir:
print('Skipping current directory')
else:
success = success and compile_dir(dir, maxlevels, None,
force, quiet=quiet,
legacy=legacy, optimize=optimize)
return success
def main():
"""Script main program."""
import argparse
parser = argparse.ArgumentParser(
description='Utilities to support installing Python libraries.')
parser.add_argument('-l', action='store_const', const=0,
default=10, dest='maxlevels',
help="don't recurse into subdirectories")
parser.add_argument('-f', action='store_true', dest='force',
help='force rebuild even if timestamps are up to date')
parser.add_argument('-q', action='store_true', dest='quiet',
help='output only error messages')
parser.add_argument('-b', action='store_true', dest='legacy',
help='use legacy (pre-PEP3147) compiled file locations')
parser.add_argument('-d', metavar='DESTDIR', dest='ddir', default=None,
help=('directory to prepend to file paths for use in '
'compile-time tracebacks and in runtime '
'tracebacks in cases where the source file is '
'unavailable'))
parser.add_argument('-x', metavar='REGEXP', dest='rx', default=None,
help=('skip files matching the regular expression; '
'the regexp is searched for in the full path '
'of each file considered for compilation'))
parser.add_argument('-i', metavar='FILE', dest='flist',
help=('add all the files and directories listed in '
'FILE to the list considered for compilation; '
'if "-", names are read from stdin'))
parser.add_argument('compile_dest', metavar='FILE|DIR', nargs='*',
help=('zero or more file and directory names '
'to compile; if no arguments given, defaults '
'to the equivalent of -l sys.path'))
args = parser.parse_args()
compile_dests = args.compile_dest
if args.rx:
import re
args.rx = re.compile(args.rx)
# if flist is provided then load it
if args.flist:
try:
with (sys.stdin if args.flist=='-' else open(args.flist)) as f:
for line in f:
compile_dests.append(line.strip())
except OSError:
print("Error reading file list {}".format(args.flist))
return False
success = True
try:
if compile_dests:
for dest in compile_dests:
if os.path.isfile(dest):
if not compile_file(dest, args.ddir, args.force, args.rx,
args.quiet, args.legacy):
success = False
else:
if not compile_dir(dest, args.maxlevels, args.ddir,
args.force, args.rx, args.quiet,
args.legacy):
success = False
return success
else:
return compile_path(legacy=args.legacy, force=args.force,
quiet=args.quiet)
except KeyboardInterrupt:
print("\n[interrupted]")
return False
return True
if __name__ == '__main__':
exit_status = int(not main())
sys.exit(exit_status)
|
the-stack_0_15109 | ##########################################################################################################################################
## License: Apache 2.0. See LICENSE file in root directory. ##
##########################################################################################################################################
import pyrealsense2 as rs
import cv2
import numpy as np
import time
from dynamo.realsense_device_manager import DeviceManager
import dynamo.calibration as calibration
import dynamo.stream as stream
import copy
import threading
import sys
import multiprocessing
import pickle
import queue
import os
import argparse
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--load", help="load calibration",
nargs='?')
parser.add_argument("--new", help="new calibration",
nargs='?',default="new.cal")
parser.add_argument("--folder", help="data folder",
nargs = '?', default="data")
parser.add_argument("--time", help="time to collect data (s)",
nargs = '?', default="10")
args = parser.parse_args()
rsConfig = rs.config()
if args.load:
print(os.path.join(os.getcwd(),args.load))
file = open(os.path.join(os.getcwd(),args.load),'rb')
transformation = pickle.load(file)
file.close()
#transformation = calibration.load(os.path.join(os.getcwd(),args.load))
print(transformation)
deviceManager = DeviceManager(rs.context(), rsConfig)
deviceManager.enable_all_emitters()
elif args.new:
resolutionWidth = 848
resolutionHeight = 480
frameRate = 30
rsConfig.enable_stream(rs.stream.depth, resolutionWidth, resolutionHeight, rs.format.z16, frameRate)
rsConfig.enable_stream(rs.stream.color, resolutionWidth, resolutionHeight, rs.format.bgr8, frameRate)
deviceManager = DeviceManager(rs.context(), rsConfig)
deviceManager.enable_all_emitters()
deviceManager.load_settings_json('calibrationSettings.json')
cameraOrder = [
'822512060522',
'823112060874',
'823112060112',
'822512060553',
'822512060853',
'822512061105']
transformation = calibration.newIterative(args.new,deviceManager, cameraOrder, 4,5,0.0762)
deviceManager.disable_all_devices()
rsConfig.disable_stream(rs.stream.depth)
rsConfig.disable_stream(rs.stream.color)
resolutionWidth = 848
resolutionHeight = 480
frameRate = 90
rsConfig.enable_stream(rs.stream.depth, resolutionWidth, resolutionHeight, rs.format.z16, frameRate)
rsConfig.enable_stream(rs.stream.infrared, 1, resolutionWidth, resolutionHeight, rs.format.y8, frameRate)
deviceManager.load_settings_json('markerSettings.json')
deviceManager.enable_all_devices()
input("Calibration complete, press Enter to continue...")
script_path = os.path.abspath(__file__)
scriptDir = os.path.split(script_path)[0]
if not os.path.isdir(os.path.join(os.getcwd(), args.folder)):
os.mkdir(args.folder) #make base folder if it doesn't exist already
iteration = 1
while True:
loc = args.folder+'\\'+str(format(iteration, '02d'))
saveDirectory = os.path.join(os.getcwd(), loc)
if not os.path.isdir(saveDirectory):
os.mkdir(args.folder+'\\'+str(format(iteration, '02d'))) #make iteration folder if it doesn't exist already
data = stream.start(deviceManager, transformation, saveDirectory,args.time)
input("Data Collection complete, press Enter to continue...")
iteration+=1
|
the-stack_0_15110 | #!/usr/bin/env python3
import binascii
import os
import struct
import time
from collections import namedtuple
import numpy as np
from opendbc import DBC_PATH
from common.realtime import Ratekeeper
from selfdrive.config import Conversions as CV
import selfdrive.messaging as messaging
from selfdrive.services import service_list
from selfdrive.car import crc8_pedal
from selfdrive.car.honda.hondacan import fix
from selfdrive.car.honda.values import CAR
from selfdrive.car.honda.carstate import get_can_signals
from selfdrive.boardd.boardd import can_capnp_to_can_list, can_list_to_can_capnp
from selfdrive.can.plant_can_parser import CANParser
from selfdrive.car.honda.interface import CarInterface
from common.dbc import dbc
honda = dbc(os.path.join(DBC_PATH, "honda_civic_touring_2016_can_generated.dbc"))
# Trick: set 0x201 (interceptor) in fingerprints for gas is controlled like if there was an interceptor
CP = CarInterface.get_params(CAR.CIVIC, {0: {0x201: 6}, 1: {}, 2: {}, 3: {}})
def car_plant(pos, speed, grade, gas, brake):
# vehicle parameters
mass = 1700
aero_cd = 0.3
force_peak = mass*3.
force_brake_peak = -mass*10. #1g
power_peak = 100000 # 100kW
speed_base = power_peak/force_peak
rolling_res = 0.01
g = 9.81
frontal_area = 2.2
air_density = 1.225
gas_to_peak_linear_slope = 3.33
brake_to_peak_linear_slope = 0.3
creep_accel_v = [1., 0.]
creep_accel_bp = [0., 1.5]
#*** longitudinal model ***
# find speed where peak torque meets peak power
force_brake = brake * force_brake_peak * brake_to_peak_linear_slope
if speed < speed_base: # torque control
force_gas = gas * force_peak * gas_to_peak_linear_slope
else: # power control
force_gas = gas * power_peak / speed * gas_to_peak_linear_slope
force_grade = - grade * mass # positive grade means uphill
creep_accel = np.interp(speed, creep_accel_bp, creep_accel_v)
force_creep = creep_accel * mass
force_resistance = -(rolling_res * mass * g + 0.5 * speed**2 * aero_cd * air_density * frontal_area)
force = force_gas + force_brake + force_resistance + force_grade + force_creep
acceleration = force / mass
# TODO: lateral model
return speed, acceleration
def get_car_can_parser():
dbc_f = 'honda_civic_touring_2016_can_generated.dbc'
signals = [
("STEER_TORQUE", 0xe4, 0),
("STEER_TORQUE_REQUEST", 0xe4, 0),
("COMPUTER_BRAKE", 0x1fa, 0),
("COMPUTER_BRAKE_REQUEST", 0x1fa, 0),
("GAS_COMMAND", 0x200, 0),
]
checks = [
(0xe4, 100),
(0x1fa, 50),
(0x200, 50),
]
return CANParser(dbc_f, signals, checks)
def to_3_byte(x):
# Convert into 12 bit value
s = struct.pack("!H", int(x))
return binascii.hexlify(s)[1:]
def to_3s_byte(x):
s = struct.pack("!h", int(x))
return binascii.hexlify(s)[1:]
class Plant():
messaging_initialized = False
def __init__(self, lead_relevancy=False, rate=100, speed=0.0, distance_lead=2.0):
self.rate = rate
if not Plant.messaging_initialized:
Plant.logcan = messaging.pub_sock(service_list['can'].port)
Plant.sendcan = messaging.sub_sock(service_list['sendcan'].port)
Plant.model = messaging.pub_sock(service_list['model'].port)
Plant.live_params = messaging.pub_sock(service_list['liveParameters'].port)
Plant.health = messaging.pub_sock(service_list['health'].port)
Plant.thermal = messaging.pub_sock(service_list['thermal'].port)
Plant.driverMonitoring = messaging.pub_sock(service_list['driverMonitoring'].port)
Plant.cal = messaging.pub_sock(service_list['liveCalibration'].port)
Plant.controls_state = messaging.sub_sock(service_list['controlsState'].port)
Plant.plan = messaging.sub_sock(service_list['plan'].port)
Plant.messaging_initialized = True
self.frame = 0
self.angle_steer = 0.
self.gear_choice = 0
self.speed, self.speed_prev = 0., 0.
self.esp_disabled = 0
self.main_on = 1
self.user_gas = 0
self.computer_brake,self.user_brake = 0,0
self.brake_pressed = 0
self.angle_steer_rate = 0
self.distance, self.distance_prev = 0., 0.
self.speed, self.speed_prev = speed, speed
self.steer_error, self.brake_error, self.steer_not_allowed = 0, 0, 0
self.gear_shifter = 8 # D gear
self.pedal_gas = 0
self.cruise_setting = 0
self.seatbelt, self.door_all_closed = True, True
self.steer_torque, self.v_cruise, self.acc_status = 0, 0, 0 # v_cruise is reported from can, not the one used for controls
self.lead_relevancy = lead_relevancy
# lead car
self.distance_lead, self.distance_lead_prev = distance_lead , distance_lead
self.rk = Ratekeeper(rate, print_delay_threshold=100)
self.ts = 1./rate
self.cp = get_car_can_parser()
self.response_seen = False
time.sleep(1)
messaging.drain_sock(Plant.sendcan)
messaging.drain_sock(Plant.controls_state)
def close(self):
Plant.logcan.close()
Plant.model.close()
Plant.live_params.close()
def speed_sensor(self, speed):
if speed<0.3:
return 0
else:
return speed * CV.MS_TO_KPH
def current_time(self):
return float(self.rk.frame) / self.rate
def step(self, v_lead=0.0, cruise_buttons=None, grade=0.0, publish_model = True):
gen_signals, gen_checks = get_can_signals(CP)
sgs = [s[0] for s in gen_signals]
msgs = [s[1] for s in gen_signals]
cks_msgs = set(check[0] for check in gen_checks)
cks_msgs.add(0x18F)
cks_msgs.add(0x30C)
# ******** get messages sent to the car ********
can_msgs = []
for a in messaging.drain_sock(Plant.sendcan, wait_for_one=self.response_seen):
can_msgs.extend(can_capnp_to_can_list(a.sendcan, [0,2]))
# After the first response the car is done fingerprinting, so we can run in lockstep with controlsd
if can_msgs:
self.response_seen = True
self.cp.update_can(can_msgs)
# ******** get controlsState messages for plotting ***
controls_state_msgs = []
for a in messaging.drain_sock(Plant.controls_state, wait_for_one=self.response_seen):
controls_state_msgs.append(a.controlsState)
fcw = None
for a in messaging.drain_sock(Plant.plan):
if a.plan.fcw:
fcw = True
if self.cp.vl[0x1fa]['COMPUTER_BRAKE_REQUEST']:
brake = self.cp.vl[0x1fa]['COMPUTER_BRAKE'] * 0.003906248
else:
brake = 0.0
if self.cp.vl[0x200]['GAS_COMMAND'] > 0:
gas = self.cp.vl[0x200]['GAS_COMMAND'] / 256.0
else:
gas = 0.0
if self.cp.vl[0xe4]['STEER_TORQUE_REQUEST']:
steer_torque = self.cp.vl[0xe4]['STEER_TORQUE']*1.0/0xf00
else:
steer_torque = 0.0
distance_lead = self.distance_lead_prev + v_lead * self.ts
# ******** run the car ********
speed, acceleration = car_plant(self.distance_prev, self.speed_prev, grade, gas, brake)
distance = self.distance_prev + speed * self.ts
speed = self.speed_prev + self.ts * acceleration
if speed <= 0:
speed = 0
acceleration = 0
# ******** lateral ********
self.angle_steer -= (steer_torque/10.0) * self.ts
# *** radar model ***
if self.lead_relevancy:
d_rel = np.maximum(0., distance_lead - distance)
v_rel = v_lead - speed
else:
d_rel = 200.
v_rel = 0.
lateral_pos_rel = 0.
# print at 5hz
if (self.frame % (self.rate//5)) == 0:
print("%6.2f m %6.2f m/s %6.2f m/s2 %.2f ang gas: %.2f brake: %.2f steer: %5.2f lead_rel: %6.2f m %6.2f m/s" % (distance, speed, acceleration, self.angle_steer, gas, brake, steer_torque, d_rel, v_rel))
# ******** publish the car ********
vls_tuple = namedtuple('vls', [
'XMISSION_SPEED',
'WHEEL_SPEED_FL', 'WHEEL_SPEED_FR', 'WHEEL_SPEED_RL', 'WHEEL_SPEED_RR',
'STEER_ANGLE', 'STEER_ANGLE_RATE', 'STEER_TORQUE_SENSOR', 'STEER_TORQUE_MOTOR',
'LEFT_BLINKER', 'RIGHT_BLINKER',
'GEAR',
'WHEELS_MOVING',
'BRAKE_ERROR_1', 'BRAKE_ERROR_2',
'SEATBELT_DRIVER_LAMP', 'SEATBELT_DRIVER_LATCHED',
'BRAKE_PRESSED', 'BRAKE_SWITCH',
'CRUISE_BUTTONS',
'ESP_DISABLED',
'HUD_LEAD',
'USER_BRAKE',
'STEER_STATUS',
'GEAR_SHIFTER',
'PEDAL_GAS',
'CRUISE_SETTING',
'ACC_STATUS',
'CRUISE_SPEED_PCM',
'CRUISE_SPEED_OFFSET',
'DOOR_OPEN_FL', 'DOOR_OPEN_FR', 'DOOR_OPEN_RL', 'DOOR_OPEN_RR',
'CAR_GAS',
'MAIN_ON',
'EPB_STATE',
'BRAKE_HOLD_ACTIVE',
'INTERCEPTOR_GAS',
'INTERCEPTOR_GAS2',
'IMPERIAL_UNIT',
])
vls = vls_tuple(
self.speed_sensor(speed),
self.speed_sensor(speed), self.speed_sensor(speed), self.speed_sensor(speed), self.speed_sensor(speed),
self.angle_steer, self.angle_steer_rate, 0, 0,#Steer torque sensor
0, 0, # Blinkers
self.gear_choice,
speed != 0,
self.brake_error, self.brake_error,
not self.seatbelt, self.seatbelt, # Seatbelt
self.brake_pressed, 0., #Brake pressed, Brake switch
cruise_buttons,
self.esp_disabled,
0, # HUD lead
self.user_brake,
self.steer_error,
self.gear_shifter,
self.pedal_gas,
self.cruise_setting,
self.acc_status,
self.v_cruise,
0, # Cruise speed offset
0, 0, 0, 0, # Doors
self.user_gas,
self.main_on,
0, # EPB State
0, # Brake hold
0, # Interceptor feedback
0, # Interceptor 2 feedback
False
)
# TODO: publish each message at proper frequency
can_msgs = []
for msg in set(msgs):
msg_struct = {}
indxs = [i for i, x in enumerate(msgs) if msg == msgs[i]]
for i in indxs:
msg_struct[sgs[i]] = getattr(vls, sgs[i])
if "COUNTER" in honda.get_signals(msg):
msg_struct["COUNTER"] = self.frame % 4
if "COUNTER_PEDAL" in honda.get_signals(msg):
msg_struct["COUNTER_PEDAL"] = self.frame % 0xf
msg = honda.lookup_msg_id(msg)
msg_data = honda.encode(msg, msg_struct)
if "CHECKSUM" in honda.get_signals(msg):
msg_data = fix(msg_data, msg)
if "CHECKSUM_PEDAL" in honda.get_signals(msg):
msg_struct["CHECKSUM_PEDAL"] = crc8_pedal(msg_data[:-1])
msg_data = honda.encode(msg, msg_struct)
can_msgs.append([msg, 0, msg_data, 0])
# add the radar message
# TODO: use the DBC
if self.frame % 5 == 0:
radar_state_msg = b'\x79\x00\x00\x00\x00\x00\x00\x00'
radar_msg = to_3_byte(d_rel*16.0) + \
to_3_byte(int(lateral_pos_rel*16.0)&0x3ff) + \
to_3s_byte(int(v_rel*32.0)) + \
b"0f00000"
radar_msg = binascii.unhexlify(radar_msg)
can_msgs.append([0x400, 0, radar_state_msg, 1])
can_msgs.append([0x445, 0, radar_msg, 1])
# add camera msg so controlsd thinks it's alive
msg_struct["COUNTER"] = self.frame % 4
msg = honda.lookup_msg_id(0xe4)
msg_data = honda.encode(msg, msg_struct)
msg_data = fix(msg_data, 0xe4)
can_msgs.append([0xe4, 0, msg_data, 2])
# Fake sockets that controlsd subscribes to
live_parameters = messaging.new_message()
live_parameters.init('liveParameters')
live_parameters.liveParameters.valid = True
live_parameters.liveParameters.sensorValid = True
live_parameters.liveParameters.posenetValid = True
live_parameters.liveParameters.steerRatio = CP.steerRatio
live_parameters.liveParameters.stiffnessFactor = 1.0
Plant.live_params.send(live_parameters.to_bytes())
driver_monitoring = messaging.new_message()
driver_monitoring.init('driverMonitoring')
driver_monitoring.driverMonitoring.faceOrientation = [0.] * 3
driver_monitoring.driverMonitoring.facePosition = [0.] * 2
Plant.driverMonitoring.send(driver_monitoring.to_bytes())
health = messaging.new_message()
health.init('health')
health.health.controlsAllowed = True
Plant.health.send(health.to_bytes())
thermal = messaging.new_message()
thermal.init('thermal')
thermal.thermal.freeSpace = 1.
thermal.thermal.batteryPercent = 100
Plant.thermal.send(thermal.to_bytes())
# ******** publish a fake model going straight and fake calibration ********
# note that this is worst case for MPC, since model will delay long mpc by one time step
if publish_model and self.frame % 5 == 0:
md = messaging.new_message()
cal = messaging.new_message()
md.init('model')
cal.init('liveCalibration')
md.model.frameId = 0
for x in [md.model.path, md.model.leftLane, md.model.rightLane]:
x.points = [0.0]*50
x.prob = 1.0
x.std = 1.0
if self.lead_relevancy:
d_rel = np.maximum(0., distance_lead - distance)
v_rel = v_lead - speed
prob = 1.0
else:
d_rel = 200.
v_rel = 0.
prob = 0.0
md.model.lead.dist = float(d_rel)
md.model.lead.prob = prob
md.model.lead.relY = 0.0
md.model.lead.relYStd = 1.
md.model.lead.relVel = float(v_rel)
md.model.lead.relVelStd = 1.
md.model.lead.relA = 0.0
md.model.lead.relAStd = 10.
md.model.lead.std = 1.0
cal.liveCalibration.calStatus = 1
cal.liveCalibration.calPerc = 100
cal.liveCalibration.rpyCalib = [0.] * 3
# fake values?
Plant.model.send(md.to_bytes())
Plant.cal.send(cal.to_bytes())
Plant.logcan.send(can_list_to_can_capnp(can_msgs))
# ******** update prevs ********
self.frame += 1
if self.response_seen:
self.rk.monitor_time()
self.speed = speed
self.distance = distance
self.distance_lead = distance_lead
self.speed_prev = speed
self.distance_prev = distance
self.distance_lead_prev = distance_lead
else:
# Don't advance time when controlsd is not yet ready
self.rk.keep_time()
self.rk._frame = 0
return {
"distance": distance,
"speed": speed,
"acceleration": acceleration,
"distance_lead": distance_lead,
"brake": brake,
"gas": gas,
"steer_torque": steer_torque,
"fcw": fcw,
"controls_state_msgs": controls_state_msgs,
}
# simple engage in standalone mode
def plant_thread(rate=100):
plant = Plant(rate)
while 1:
plant.step()
if __name__ == "__main__":
plant_thread()
|
the-stack_0_15114 | # coding=utf-8
import os
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
path = os.path.join(os.getcwd(), 'images', 'annotations')
if not os.path.exists(path):
raise ValueError('The images\\annotations firectory does not exist')
else:
files = os.listdir(path)
results = {}
files = [file for file in files if file.endswith('.XML') or file.endswith('.xml')]
for file in files:
objectsDetected = 0
filePath = os.path.join(path, file)
tree = ET.parse(filePath)
root = tree.getroot()
for member in root.findall('object'):
label = member[0].text
if label != 'hoja' and label != 'dano':
objectsDetected = objectsDetected + 1
if objectsDetected in results:
results[objectsDetected] = results[objectsDetected] + 1
else:
results[objectsDetected] = 1
print("Cantidad de objetos, Cantidad de imagenes")
for key, value in results.items():
print("{0},{1}".format(key, value))
plt.bar(list(results.keys()), results.values(), color='g', width=0.9)
plt.ylabel('Cantidad de imágenes')
plt.xlabel('Cantidad de objetos anotados (Excluyendo hojas y daños)')
plt.show() |
the-stack_0_15119 | #!/usr/bin/env python3
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test segwit transactions and blocks on P2P network."""
from decimal import Decimal
import math
import random
import struct
import time
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER
from test_framework.key import ECKey
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
CBlock,
CBlockHeader,
CInv,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
CTxWitness,
MSG_WITNESS_FLAG,
NODE_NETWORK,
NODE_WITNESS,
msg_no_witness_block,
msg_getdata,
msg_headers,
msg_inv,
msg_tx,
msg_block,
msg_no_witness_tx,
ser_uint256,
ser_vector,
sha256,
uint256_from_str,
FromHex,
)
from test_framework.mininode import (
P2PInterface,
mininode_lock,
)
from test_framework.script import (
CScript,
CScriptNum,
CScriptOp,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_16,
OP_2DROP,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_HASH160,
OP_IF,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
SIGHASH_ANYONECANPAY,
SIGHASH_NONE,
SIGHASH_SINGLE,
SegwitV0SignatureHash,
LegacySignatureHash,
hash160,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
disconnect_nodes,
softfork_active,
hex_str_to_bytes,
assert_raises_rpc_error,
bytes_to_hex_str,
)
from test_framework.eqpayconfig import *
from test_framework.eqpay import generatesynchronized
from test_framework.messages import COIN
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000 // FACTOR_REDUCED_BLOCK_TIME
SEGWIT_HEIGHT = 2020 if ENABLE_REDUCED_BLOCK_TIME else 520
class UTXO():
"""Used to keep track of anyone-can-spend outputs that we can use in the tests."""
def __init__(self, sha256, n, value):
self.sha256 = sha256
self.n = n
self.nValue = value
def get_p2pkh_script(pubkeyhash):
"""Get the script associated with a P2PKH."""
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key):
"""Add signature for a P2PK witness program."""
tx_hash = SegwitV0SignatureHash(script, tx_to, in_idx, hashtype, value)
signature = key.sign_ecdsa(tx_hash) + chr(hashtype).encode('latin-1')
tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [signature, script]
tx_to.rehash()
def get_virtual_size(witness_block):
"""Calculate the virtual size of a witness block.
Virtual size is base + witness/4."""
base_size = len(witness_block.serialize(with_witness=False))
total_size = len(witness_block.serialize())
# the "+3" is so we round up
vsize = int((3 * base_size + total_size + 3) / 4)
return vsize
def submit_old_blocks(node, n):
node.importprivkey("cRComRro8wTGnDTGqgpyP5vwwo24Tn831cPu3PZEdr2532JVPjrZ")
pubkey = "03716d5678c829d09cdfdb4bec058712de3ecd99968a4a064336ffb592342e21f9"
num_blocks_old = node.getblockcount()
for i in range(0, n):
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.vtx[0].vout[0].scriptPubKey = CScript([hex_str_to_bytes(pubkey), OP_CHECKSIG])
block.vtx[0].rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize()))
assert_equal(node.getblockcount(), num_blocks_old+n)
def test_transaction_acceptance(node, p2p, tx, with_witness, accepted, reason=None):
"""Send a transaction to the node and check that it's accepted to the mempool
- Submit the transaction over the p2p interface
- use the getrawmempool rpc to check for acceptance."""
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
p2p.send_and_ping(msg_tx(tx) if with_witness else msg_no_witness_tx(tx))
assert_equal(tx.hash in node.getrawmempool(), accepted)
def test_witness_block(node, p2p, block, accepted, with_witness=True, reason=None):
"""Send a block to the node and check that it's accepted
- Submit the block over the p2p interface
- use the getbestblockhash rpc to check for acceptance."""
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
p2p.send_and_ping(msg_block(block) if with_witness else msg_no_witness_block(block))
assert_equal(node.getbestblockhash() == block.hash, accepted)
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.getdataset = set()
# Avoid sending out msg_getdata in the mininode thread as a reply to invs.
# They are not needed and would only lead to races because we send msg_getdata out in the test thread
def on_inv(self, message):
pass
def on_getdata(self, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60, success=True):
with mininode_lock:
self.last_message.pop("getdata", None)
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
if success:
self.wait_for_getdata(timeout)
else:
time.sleep(timeout)
assert not self.last_message.get("getdata")
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.last_message.pop("getheaders", None)
msg = msg_headers()
msg.headers = [CBlockHeader(block)]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_message["block"].block
class SegWitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [
["-acceptnonstdtxn=1", "-segwitheight={}".format(SEGWIT_HEIGHT), "[email protected]"],
["-acceptnonstdtxn=0", "-segwitheight={}".format(SEGWIT_HEIGHT)],
["-acceptnonstdtxn=1", "-segwitheight=-1"],
]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
self.sync_all()
# Helper functions
def build_next_block(self, version=4):
"""Build a block on top of node0's tip."""
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = version
block.rehash()
return block
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
"""Add list of transactions to block, adds witness commitment, then solves."""
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
def run_test(self):
# Setup the p2p connections
# self.test_node sets NODE_WITNESS|NODE_NETWORK
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
# self.old_node sets only NODE_NETWORK
self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK)
# self.std_node is for testing node1 (fRequireStandard=true)
self.std_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
assert self.test_node.nServices & NODE_WITNESS != 0
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
self.log.info("Starting tests before segwit activation")
self.segwit_active = False
self.test_non_witness_transaction()
self.test_v0_outputs_arent_spendable()
self.test_block_relay()
self.test_getblocktemplate_before_lockin()
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_standardness_v0()
self.log.info("Advancing to segwit activation")
self.advance_to_segwit_active()
# Segwit status 'active'
self.test_p2sh_witness()
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay()
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0()
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness_blinding()
self.test_non_standard_witness()
self.test_upgrade_after_activation()
self.test_witness_sigops()
self.test_superfluous_witness()
# Individual tests
def subtest(func): # noqa: N805
"""Wraps the subtests for logging and state assertions."""
def func_wrapper(self, *args, **kwargs):
self.log.info("Subtest: {} (Segwit active = {})".format(func.__name__, self.segwit_active))
# Assert segwit status is as expected
assert_equal(softfork_active(self.nodes[0], 'segwit'), self.segwit_active)
func(self, *args, **kwargs)
# Each subtest should leave some utxos for the next subtest
assert self.utxo
self.sync_blocks()
# Assert segwit status is as expected at end of subtest
assert_equal(softfork_active(self.nodes[0], 'segwit'), self.segwit_active)
return func_wrapper
@subtest
def test_non_witness_transaction(self):
"""See if sending a regular transaction works, and create a utxo to use in later tests."""
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
block = self.build_next_block(version=4)
block.solve()
self.test_node.send_and_ping(msg_no_witness_block(block)) # make sure the block was processed
txid = block.vtx[0].sha256
submit_old_blocks(self.nodes[0], COINBASE_MATURITY - 1) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(int((INITIAL_BLOCK_REWARD-Decimal('0.01')) * COIN), CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_no_witness_tx(tx).serialize(), msg_tx(tx).serialize())
self.test_node.send_and_ping(msg_tx(tx)) # make sure the block was processed
assert tx.hash in self.nodes[0].getrawmempool()
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, (INITIAL_BLOCK_REWARD-1) * 100000000))
self.nodes[0].generate(1)
self.segwit_status = 'started' # we will advance to started immediately...
@subtest
def test_unnecessary_witness_before_segwit_activation(self):
"""Verify that blocks with witnesses are rejected before activation."""
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert tx.sha256 != tx.calc_sha256(with_witness=True)
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(version=(VB_TOP_BITS | (1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, reason='unexpected-witness')
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_and_ping(msg_no_witness_block(block)) # make sure the block was processed
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
@subtest
def test_block_relay(self):
"""Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG.
This is true regardless of segwit activation.
Also test that we don't ask for blocks from unupgraded peers."""
blocktype = 2 | MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert self.test_node.last_message["getdata"].inv[0].type == blocktype
test_witness_block(self.nodes[0], self.test_node, block1, True)
block2 = self.build_next_block(version=4)
block2.solve()
self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
assert self.test_node.last_message["getdata"].inv[0].type == blocktype
test_witness_block(self.nodes[0], self.test_node, block2, True)
block3 = self.build_next_block(version=(VB_TOP_BITS | (1 << 15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert self.test_node.last_message["getdata"].inv[0].type == blocktype
test_witness_block(self.nodes[0], self.test_node, block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if not self.segwit_active:
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height + 1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2 | MSG_WITNESS_FLAG)
assert_equal(block.serialize(), wit_block.serialize())
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert len(block.vtx[0].wit.vtxinwit) == 1
assert len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2 | MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(), block.serialize())
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize()))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3 * len(block.serialize(False)) + len(block.serialize())
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
block4 = self.build_next_block(version=4)
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
# Since 0.14, inv's will only be responded to with a getheaders, so send a header
# to announce this block.
msg = msg_headers()
msg.headers = [CBlockHeader(block4)]
self.old_node.send_message(msg)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert block4.sha256 not in self.old_node.getdataset
@subtest
def test_v0_outputs_arent_spendable(self):
"""Test that v0 outputs aren't spendable before segwit activation.
~6 months after segwit activation, the SCRIPT_VERIFY_WITNESS flag was
backdated so that it applies to all blocks, going back to the genesis
block.
Consequently, version 0 witness outputs are never spendable without
witness, and so can't be spent before segwit activation (the point at which
blocks are permitted to contain witnesses)."""
# node2 doesn't need to be connected for this test.
# (If it's connected, node0 may propagate an invalid block to it over
# compact blocks and the nodes would have inconsistent tips.)
disconnect_nodes(self.nodes[0], 2)
# Create two outputs, a p2wsh and p2sh-p2wsh
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(script_pubkey)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
value = self.utxo[0].nValue // 3
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b'')]
tx.vout = [CTxOut(value, script_pubkey), CTxOut(value, p2sh_script_pubkey)]
tx.vout.append(CTxOut(value, CScript([OP_TRUE])))
tx.rehash()
txid = tx.sha256
# Add it to a block
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# Verify that segwit isn't activated. A block serialized with witness
# should be rejected prior to activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')
# Now send the block without witness. It should be accepted
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=False)
# Now try to spend the outputs. This should fail since SCRIPT_VERIFY_WITNESS is always enabled.
p2wsh_tx = CTransaction()
p2wsh_tx.vin = [CTxIn(COutPoint(txid, 0), b'')]
p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2wsh_tx.rehash()
p2sh_p2wsh_tx = CTransaction()
p2sh_p2wsh_tx.vin = [CTxIn(COutPoint(txid, 1), CScript([script_pubkey]))]
p2sh_p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2sh_p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2sh_p2wsh_tx.rehash()
for tx in [p2wsh_tx, p2sh_p2wsh_tx]:
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# When the block is serialized with a witness, the block will be rejected because witness
# data isn't allowed in blocks that don't commit to witness data.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')
# When the block is serialized without witness, validation fails because the transaction is
# invalid (transactions are always validated with SCRIPT_VERIFY_WITNESS so a segwit v0 transaction
# without a witness is invalid).
# Note: The reject reason for this failure could be
# 'block-validation-failed' (if script check threads > 1) or
# 'non-mandatory-script-verify-flag (Witness program was passed an
# empty witness)' (otherwise).
# TODO: support multiple acceptable reject reasons.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=False)
connect_nodes(self.nodes[0], 2)
self.utxo.pop(0)
self.utxo.append(UTXO(txid, 2, value))
@subtest
def test_getblocktemplate_before_lockin(self):
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules": ["segwit"]})
if node == self.nodes[2]:
# If this is a non-segwit node, we should not get a witness
# commitment.
assert 'default_witness_commitment' not in gbt_results
else:
# For segwit-aware nodes, check the witness
# commitment is correct.
assert 'default_witness_commitment' in gbt_results
witness_commitment = gbt_results['default_witness_commitment']
# Check that default_witness_commitment is present.
witness_root = CBlock.get_merkle_root([ser_uint256(0),
ser_uint256(txid)])
script = get_witness_script(witness_root, 0)
assert_equal(witness_commitment, script.hex())
# Clear out the mempool
self.nodes[0].generate(1)
self.sync_blocks()
@subtest
def test_witness_tx_relay_before_segwit_activation(self):
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 100000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert self.old_node.last_message["getdata"].inv[0].type == 1
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2, success=False)
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.old_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
@subtest
def test_standardness_v0(self):
"""Test V0 txout standardness.
V0 segwit outputs and inputs are always standard.
V0 segwit inputs may only be mined after activation, but not before."""
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue - 100000, p2sh_script_pubkey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
self.sync_blocks()
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue - 1000000, script_pubkey)]
tx.vout.append(CTxOut(800000, script_pubkey)) # Might burn this later
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER # Just to have the option to bump this tx from the mempool
tx.rehash()
# This is always accepted, since the mempool policy is to consider segwit as always active
# and thus allow segwit outputs
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=True)
# Now create something that looks like a P2PKH output. This won't be spendable.
script_pubkey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
# tx was accepted, so we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(700000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)
# Now update self.utxo for later tests.
tx3 = CTransaction()
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
self.sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue - 100000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
if not self.segwit_active:
# Just check mempool acceptance, but don't add the transaction to the mempool, since witness is disallowed
# in blocks and the tx is impossible to mine right now.
assert_equal(self.nodes[0].testmempoolaccept([tx3.serialize_with_witness().hex()]), [{'txid': tx3.hash, 'allowed': True}])
# Create the same output as tx3, but by replacing tx
tx3_out = tx3.vout[0]
tx3 = tx
tx3.vout = [tx3_out]
tx3.rehash()
assert_equal(self.nodes[0].testmempoolaccept([tx3.serialize_with_witness().hex()]), [{'txid': tx3.hash, 'allowed': True}])
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
self.sync_blocks()
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
@subtest
def advance_to_segwit_active(self):
"""Mine enough blocks to activate segwit."""
assert not softfork_active(self.nodes[0], 'segwit')
height = self.nodes[0].getblockcount()
self.nodes[0].generate(SEGWIT_HEIGHT - height - 2)
assert not softfork_active(self.nodes[0], 'segwit')
self.nodes[0].generate(1)
assert softfork_active(self.nodes[0], 'segwit')
self.segwit_active = True
@subtest
def test_p2sh_witness(self):
"""Test P2SH wrapped witness programs."""
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
script_pubkey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 100000, script_pubkey))
tx.rehash()
# Verify mempool acceptance and block validity
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=True)
self.sync_blocks()
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), script_sig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue - 100000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older bitcoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
with self.nodes[0].assert_debug_log(
expected_msgs=(spend_tx.hash, 'was not accepted: non-mandatory-script-verify-flag (Witness program was passed an empty witness)')):
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
with self.nodes[0].assert_debug_log(
expected_msgs=(spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Script evaluated without error but finished with a false/empty top stack element)')):
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = script_sig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [b'a', witness_program]
# Verify mempool acceptance
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=True, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're after activation, then sending this with witnesses should be valid.
# This no longer works before activation, because SCRIPT_VERIFY_WITNESS
# is always set.
# TODO: rewrite this test to make clear that it only works after activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
@subtest
def test_witness_commitments(self):
"""Test witness commitments.
This test can only be run after segwit has activated."""
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert msg_block(block).serialize() != msg_no_witness_block(block).serialize()
# This empty block should be valid.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1]
# This should also be valid.
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=True)
# Now test commitments with actual transactions
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue - 100000, script_pubkey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 100000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert len(block_3.vtx[0].vout) == 4 # 3 OP_returns
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue - 100000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
test_witness_block(self.nodes[0], self.test_node, block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_block_malleability(self):
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a' * 10000000)
assert get_virtual_size(block) > MAX_BLOCK_BASE_SIZE
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(block.serialize().hex())
assert self.nodes[0].getbestblockhash() != block.hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert get_virtual_size(block) < MAX_BLOCK_BASE_SIZE
self.nodes[0].submitblock(block.serialize().hex())
assert self.nodes[0].getbestblockhash() == block.hash
# Now make sure that malleating the witness reserved value doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(1)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Changing the witness reserved value doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
@subtest
def test_witness_block_size(self):
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert len(self.utxo) > 0
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 100 // FACTOR_REDUCED_BLOCK_TIME
witness_program = CScript([OP_2DROP] * NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
script_pubkey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value / NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, script_pubkey))
parent_tx.vout[0].nValue -= 50000
assert parent_tx.vout[0].nValue > 0
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize) * 4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes + 1, 55)
block.vtx[-1].wit.vtxinwit[int(i / (2 * NUM_DROPS))].scriptWitness.stack[i % (2 * NUM_DROPS)] = b'a' * (195 + extra_bytes)
additional_bytes -= extra_bytes
i += 1
update_vtixinwit_index = int(i/(2*NUM_DROPS))
update_stack_index = i%(2*NUM_DROPS)
update_base_length = len(block.vtx[-1].wit.vtxinwit[update_vtixinwit_index].scriptWitness.stack[update_stack_index])
block.vtx[-1].wit.vtxinwit[update_vtixinwit_index].scriptWitness.stack[update_stack_index] += b'a'*4
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert len(block.serialize()) > (2 * 1024 * 1024) // FACTOR_REDUCED_BLOCK_TIME
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now resize the second transaction to make the block fit.
#cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
#block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (cur_length - 1)
#block.vtx[0].vout.pop()
block.vtx[-1].wit.vtxinwit[update_vtixinwit_index].scriptWitness.stack[update_stack_index] = b'a'*4 if ENABLE_REDUCED_BLOCK_TIME else b'a'*8
add_witness_commitment(block)
block.solve()
print(get_virtual_size(block), MAX_BLOCK_BASE_SIZE)
assert get_virtual_size(block) == MAX_BLOCK_BASE_SIZE
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
@subtest
def test_submit_block(self):
"""Test that submitblock adds the nonce automatically when possible."""
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(block.serialize().hex())
assert self.nodes[0].getbestblockhash() != block.hash
# Now redo commitment with the standard nonce, but let bitcoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(block.serialize().hex())
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(block_2.serialize().hex())
# Tip should not advance!
assert self.nodes[0].getbestblockhash() != block_2.hash
@subtest
def test_extra_witness_data(self):
"""Test extra witness data in a transaction."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 2000, script_pubkey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program]
tx2.wit.vtxinwit[1].scriptWitness.stack = [CScript([OP_TRUE])]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_push_length(self):
"""Test that witness stack can only allow up to 520 byte pushes."""
MAX_SCRIPT_ELEMENT_SIZE = 128000
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 100000, script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 100000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * (MAX_SCRIPT_ELEMENT_SIZE + 1), witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_program_length(self):
"""Test that witness outputs greater than 10kB can't be spent."""
MAX_SCRIPT_ELEMENT_SIZE = 128000
MAX_PROGRAM_LENGTH = 129000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a' * (MAX_SCRIPT_ELEMENT_SIZE-50)] + [b'a'*996] + [OP_DROP]*46 + [OP_TRUE])
assert(len(long_witness_program) == MAX_PROGRAM_LENGTH + 1)
long_witness_hash = sha256(long_witness_program)
long_script_pubkey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 100000, long_script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 100000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a' * (MAX_SCRIPT_ELEMENT_SIZE-50)] + [b'a'*996] + [OP_DROP]*45 + [OP_TRUE])
assert(len(witness_program) == MAX_PROGRAM_LENGTH)
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, script_pubkey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_witness_input_length(self):
"""Test that vin length must match vtxinwit length."""
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
value = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(value / 10), script_pubkey))
tx.vout[0].nValue -= 1000
assert tx.vout[0].nValue >= 0
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(value - 3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_tx_relay_after_segwit_activation(self):
"""Test transaction relay after segwit activation.
After segwit activates, verify that mempool:
- rejects transactions with unnecessary/extra witnesses
- accepts transactions with valid witnesses
and that witness transactions are relayed to non-upgraded peers."""
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 100000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 100000, script_pubkey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a' * 400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 100000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 100000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program]
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv([CInv(1, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
weight = len(tx3.serialize_with_witness()) + 3 * len(tx3.serialize_without_witness())
vsize = math.ceil(weight / 4)
assert_equal(raw_tx["vsize"], vsize)
assert_equal(raw_tx["weight"], weight)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], witness_program.hex())
assert vsize != raw_tx["size"]
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_segwit_versions(self):
"""Test validity of future segwit version transactions.
Future segwit versions are non-standard to spend, but valid in blocks.
Sending to future segwit versions is always allowed.
Can run this before and after segwit activation."""
NUM_SEGWIT_VERSIONS = 17 # will test OP_0, OP1, ..., OP_16
if len(self.utxo) < NUM_SEGWIT_VERSIONS:
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 400000) // NUM_SEGWIT_VERSIONS
for i in range(NUM_SEGWIT_VERSIONS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_SEGWIT_VERSIONS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
self.sync_blocks()
temp_utxo = []
tx = CTransaction()
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16 + 1)) + [OP_0]:
# First try to spend to a future version segwit script_pubkey.
script_pubkey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue - 100000, script_pubkey)]
tx.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
self.sync_blocks()
assert len(self.nodes[0].getrawmempool()) == 0
# Finally, verify that version 0 -> version 1 transactions
# are standard
script_pubkey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue - 100000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
# Gets accepted to both policy-enforcing nodes and others.
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo into an segwit v1 output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 1000000, script_pubkey))
tx3.rehash()
# First we test this transaction against fRequireStandard=true node
# making sure the txid is added to the reject filter
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, with_witness=True, accepted=False, reason="bad-txns-nonstandard-inputs")
# Now the node will no longer ask for getdata of this transaction when advertised by same txid
self.std_node.announce_tx_and_wait_for_getdata(tx3, timeout=5, success=False)
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False, reason="reserved for soft-fork upgrades")
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.sync_blocks()
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_premature_coinbase_witness_spend(self):
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = script_pubkey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
spend_tx.rehash()
# Now test a premature spend.
generatesynchronized(self.nodes[0], COINBASE_MATURITY-2, None, self.nodes)
self.sync_blocks()
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=True)
self.sync_blocks()
@subtest
def test_uncompressed_pubkey(self):
"""Test uncompressed pubkey validity in segwit transactions.
Uncompressed pubkeys are no longer supported in default relay policy,
but (for now) are still valid in blocks."""
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = ECKey()
key.generate(False)
pubkey = key.get_pubkey().get_bytes()
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue - 100000, script_pkh))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_wsh = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 100000, script_wsh))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitV0SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.rehash()
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(script_wsh)
script_p2sh = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([script_wsh])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 100000, script_p2sh))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
script_pubkey = get_p2pkh_script(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), script_sig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue - 100000, script_pubkey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue - 100000, CScript([OP_TRUE])))
(sig_hash, err) = LegacySignatureHash(script_pubkey, tx5, 0, SIGHASH_ALL)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
test_transaction_acceptance(self.nodes[0], self.test_node, tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
@subtest
def test_signature_version_1(self):
key = ECKey()
key.generate()
pubkey = key.get_pubkey().get_bytes()
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 100000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.sync_blocks()
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [0, SIGHASH_ANYONECANPAY]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 100000, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue + 1, key)
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Too-small input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue - 1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try correct value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_SIGHASH_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_SIGHASH_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_SIGHASH_TESTS
for i in range(NUM_SIGHASH_TESTS):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_SIGHASH_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_SIGHASH_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert len(temp_utxos) > num_inputs
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_p2pk_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, script_pkh))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitV0SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests by creating two outputs
# that consolidate all the coins in temp_utxos.
output_value = sum(i.nValue for i in temp_utxos) // 2
tx = CTransaction()
index = 0
# Just spend to our usual anyone-can-spend output
tx.vout = [CTxOut(output_value, CScript([OP_TRUE]))] * 2
for i in temp_utxos:
# Use SIGHASH_ALL|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, index, SIGHASH_ALL | SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
@subtest
def test_non_standard_witness_blinding(self):
"""Test behavior of unnecessary witnesses in transactions does not blind the node for the transaction"""
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 100000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, False, True)
self.nodes[0].generate(1)
self.sync_blocks()
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness
# doesn't blind a node to a transaction. Transactions
# rejected for having a witness shouldn't be added
# to the rejection cache.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), CScript([p2sh_program])))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 100000, script_pubkey))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * 400]
tx2.rehash()
# This will be rejected due to a policy check:
# No witness is allowed, since it is not a witness program but a p2sh program
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, True, False, 'bad-witness-nonstandard')
# If we send without witness, it should be accepted.
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 100000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, False, True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, False, True)
self.nodes[0].generate(1)
self.sync_blocks()
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_non_standard_witness(self):
"""Test detection of non-standard P2WSH witness"""
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 1000000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
self.sync_blocks()
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid, i * 2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 5000000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid, i * 2 + 1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 5000000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[0], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[3], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[0], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[3], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
self.sync_blocks()
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
@subtest
def test_upgrade_after_activation(self):
"""Test the behavior of starting up a segwit-aware node after the softfork has activated."""
# Restart with the new binary
self.stop_node(2)
self.start_node(2, extra_args=["-segwitheight={}".format(SEGWIT_HEIGHT)])
connect_nodes(self.nodes[0], 2)
self.sync_blocks()
# Make sure that this peer thinks segwit has activated.
assert softfork_active(self.nodes[2], 'segwit')
# Make sure this peer's blocks match those of node0.
height = self.nodes[2].getblockcount()
start_height = height
while height > 0:
block_hash = self.nodes[2].getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
block_0 = self.nodes[0].getblock(block_hash)
block_2 = self.nodes[2].getblock(block_hash)
for key in ['hash', 'confirmations', 'strippedsize', 'size', 'weight', 'height', 'version', 'versionHex', 'merkleroot', 'hashStateRoot', 'hashUTXORoot', 'tx', 'time', 'mediantime', 'nonce', 'bits', 'difficulty', 'chainwork', 'nTx', 'previousblockhash', 'nextblockhash', 'flags', 'modifier']:
if height == start_height and key == 'nextblockhash':
continue # the chain tip won't have a nextblockhash
assert_equal(block_0[key], block_2[key])
height -= 1
@subtest
def test_witness_sigops(self):
"""Test sigop counting is correct inside witnesses."""
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG] * 5 + [OP_CHECKSIG] * 193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
sigops_per_script = 20 * 5 + 193 * 1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert extra_sigops_available < 100 # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
script_pubkey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
script_pubkey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.vout[-2].scriptPubKey = script_pubkey_toomany
tx.vout[-1].scriptPubKey = script_pubkey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
test_witness_block(self.nodes[0], self.test_node, block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs - 1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_toomany]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
script_pubkey_checksigs = CScript([OP_CHECKSIG] * checksig_count)
tx2.vout.append(CTxOut(0, script_pubkey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG] * (checksig_count - 1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_4, accepted=True)
# Reset the tip back down for the next test
self.sync_blocks()
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs - 1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_justright]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_5, accepted=True)
# TODO: test p2sh sigop counting
def test_superfluous_witness(self):
# Serialization of tx that puts witness flag to 3 always
def serialize_with_bogus_witness(tx):
flags = 3
r = b""
r += struct.pack("<i", tx.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(tx.vin)
r += ser_vector(tx.vout)
if flags & 1:
if (len(tx.wit.vtxinwit) != len(tx.vin)):
# vtxinwit must have the same length as vin
tx.wit.vtxinwit = tx.wit.vtxinwit[:len(tx.vin)]
for i in range(len(tx.wit.vtxinwit), len(tx.vin)):
tx.wit.vtxinwit.append(CTxInWitness())
r += tx.wit.serialize()
r += struct.pack("<I", tx.nLockTime)
return r
class msg_bogus_tx(msg_tx):
def serialize(self):
return serialize_with_bogus_witness(self.tx)
self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(address_type='bech32'), 5)
self.nodes[0].generate(1)
unspent = next(u for u in self.nodes[0].listunspent() if u['spendable'] and u['address'].startswith('qcrt'))
raw = self.nodes[0].createrawtransaction([{"txid": unspent['txid'], "vout": unspent['vout']}], {self.nodes[0].getnewaddress(): 1})
tx = FromHex(CTransaction(), raw)
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, serialize_with_bogus_witness(tx).hex())
with self.nodes[0].assert_debug_log(['Superfluous witness record']):
self.nodes[0].p2p.send_and_ping(msg_bogus_tx(tx))
raw = self.nodes[0].signrawtransactionwithwallet(raw)
assert raw['complete']
raw = raw['hex']
tx = FromHex(CTransaction(), raw)
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, serialize_with_bogus_witness(tx).hex())
with self.nodes[0].assert_debug_log(['Unknown transaction optional data']):
self.nodes[0].p2p.send_and_ping(msg_bogus_tx(tx))
if __name__ == '__main__':
SegWitTest().main()
|
the-stack_0_15121 | # -*- coding: utf-8 -*-
from .models import Team
from rest_framework import serializers
class TeamSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Team
fields = ('code', 'name', 'strength_defence_home', 'strength_attack_home',
'strength_overall_home', 'strength_attack_away', 'strength_defence_away',
'strength_overall_away')
|
the-stack_0_15123 | import pytest
from numbers import Number
from numpy import ndarray
from hypothesis import given
import hypothesis.strategies as hst
from qcodes.dataset.param_spec import ParamSpec
@pytest.fixture
def version_0_serializations():
sers = []
sers.append({'name': 'dmm_v1',
'paramtype': 'numeric',
'label': 'Gate v1',
'unit': 'V',
'inferred_from': [],
'depends_on': ['dac_ch1', 'dac_ch2']})
sers.append({'name': 'some_name',
'paramtype': 'array',
'label': 'My Array ParamSpec',
'unit': 'Ars',
'inferred_from': ['p1', 'p2' ],
'depends_on': []})
return sers
@pytest.fixture
def version_0_deserializations():
"""
The paramspecs that the above serializations should deserialize to
"""
ps = []
ps.append(ParamSpec('dmm_v1', paramtype='numeric', label='Gate v1',
unit='V', inferred_from=[],
depends_on=['dac_ch1', 'dac_ch2']))
ps.append(ParamSpec('some_name', paramtype='array',
label='My Array ParamSpec', unit='Ars',
inferred_from=['p1', 'p2'], depends_on=[]))
return ps
@given(name=hst.text(min_size=1),
sp1=hst.text(min_size=1), sp2=hst.text(min_size=1),
inff1=hst.text(min_size=1), inff2=hst.text(min_size=1),
paramtype=hst.lists(
elements=hst.sampled_from(['numeric', 'array', 'text']),
min_size=6, max_size=6))
def test_creation(name, sp1, sp2, inff1, inff2, paramtype):
invalid_types = ['np.array', 'ndarray', 'lala', '', Number,
ndarray, 0, None]
for inv_type in invalid_types:
with pytest.raises(ValueError):
ParamSpec(name, inv_type)
if not inff1.isidentifier():
inff1 = 'inff1'
if not sp1.isidentifier():
sp1 = 'sp1'
if not name.isidentifier():
with pytest.raises(ValueError):
ps = ParamSpec(name, paramtype[0], label=None, unit='V',
inferred_from=(inff1, inff2),
depends_on=(sp1, sp2))
name = 'name'
ps = ParamSpec(name, paramtype[1], label=None, unit='V',
inferred_from=(inff1, inff2),
depends_on=(sp1, sp2))
assert ps.inferred_from == f'{inff1}, {inff2}'
assert ps.depends_on == f'{sp1}, {sp2}'
ps1 = ParamSpec(sp1, paramtype[2])
p1 = ParamSpec(name, paramtype[3], depends_on=(ps1, sp2))
assert p1.depends_on == ps.depends_on
ps2 = ParamSpec(inff1, paramtype[4])
p2 = ParamSpec(name, paramtype[5], inferred_from=(ps2, inff2))
assert p2.inferred_from == ps.inferred_from
@given(name=hst.text(min_size=1))
def test_repr(name):
okay_types = ['array', 'numeric', 'text']
for okt in okay_types:
if name.isidentifier():
ps = ParamSpec(name, okt)
expected_repr = (f"ParamSpec('{name}', '{okt}', '', '', "
"inferred_from=[], depends_on=[])")
assert ps.__repr__() == expected_repr
else:
with pytest.raises(ValueError):
ps = ParamSpec(name, okt)
alphabet = "".join([chr(i) for i in range(ord("a"), ord("z"))])
@given(
name1=hst.text(min_size=4, alphabet=alphabet),
name2=hst.text(min_size=4, alphabet=alphabet),
name3=hst.text(min_size=4, alphabet=alphabet)
)
def test_add_depends_on(name1, name2, name3):
ps1 = ParamSpec(name1, "numeric")
ps2 = ParamSpec(name2, "numeric")
ps3 = ParamSpec(name3, "numeric")
ps1.add_depends_on([ps2, ps3])
assert ps1.depends_on == f"{ps2.name}, {ps3.name}"
@given(
name1=hst.text(min_size=4, alphabet=alphabet),
name2=hst.text(min_size=4, alphabet=alphabet),
name3=hst.text(min_size=4, alphabet=alphabet)
)
def test_add_inferred_from(name1, name2, name3):
ps1 = ParamSpec(name1, "numeric")
ps2 = ParamSpec(name2, "numeric")
ps3 = ParamSpec(name3, "numeric")
ps1.add_inferred_from([ps2, ps3])
assert ps1.inferred_from == f"{ps2.name}, {ps3.name}"
@given(
name1=hst.text(min_size=4, alphabet=alphabet),
name2=hst.text(min_size=4, alphabet=alphabet),
name3=hst.text(min_size=4, alphabet=alphabet),
)
def test_copy(name1, name2, name3):
ps_indep = ParamSpec(name1, "numeric")
ps = ParamSpec(name3, "numeric", depends_on=[ps_indep])
ps_copy = ps.copy()
att_names = ["name", "type", "label", "unit",
"_inferred_from", "_depends_on"]
attributes = {}
for att in att_names:
val = getattr(ps, att)
valc = getattr(ps_copy, att)
assert val == valc
attributes[att] = val
# Modifying the copy should not change the original
for att in att_names:
if not att.startswith('_'):
setattr(ps_copy, att, attributes[att] + "_modified")
else:
setattr(ps_copy, att, attributes[att] + ['bob'])
assert getattr(ps, att) == attributes[att]
def test_serialize():
p1 = ParamSpec('p1', 'numeric', 'paramspec one', 'no unit',
depends_on=['some', 'thing'], inferred_from=['bab', 'bob'])
ser = p1.serialize()
assert ser['name'] == p1.name
assert ser['paramtype'] == p1.type
assert ser['label'] == p1.label
assert ser['unit'] == p1.unit
assert ser['depends_on'] == p1._depends_on
assert ser['inferred_from'] == p1._inferred_from
def test_deserialize(version_0_serializations, version_0_deserializations):
for sdict, ps in zip(version_0_serializations, version_0_deserializations):
deps = ParamSpec.deserialize(sdict)
assert ps == deps
|
the-stack_0_15124 | #
# Copyright 2021- IBM Inc. All rights reserved
# SPDX-License-Identifier: Apache2.0
#
import numpy as np
from scipy.sparse import issparse
from sklearn.utils.extmath import row_norms
class PHKMeansOptimizer:
def __init__(self, n_clusters, n_features, n_samples, x, x_squared_norm):
self.n_clusters = n_clusters
self.n_features = n_features
self.n_samples = n_samples
self.x = x
self.x_squared_norm = x_squared_norm
self.sparse = issparse(x)
def init_partition(self, labels, t_size, t_centroid_sum, t_centroid_avg, t_squared_norm):
sparse = issparse(self.x)
for i in range(self.n_samples):
t = labels[i]
t_size[t] += 1
if sparse:
i_start = self.x.indptr[i]
i_end = self.x.indptr[i + 1]
v_indices = self.x.indices[i_start:i_end]
v_data = self.x.data[i_start:i_end]
t_centroid_sum[t, v_indices] += v_data
else:
t_centroid_sum[t, :] += self.x[i, :]
np.multiply(t_centroid_sum, (1 / t_size)[:, None], out=t_centroid_avg)
if sparse:
t_squared_norm[:] = row_norms(t_centroid_avg, squared=True)
else:
t_squared_norm[:] = 0
# calculate inertia
inertia = 0
for i in range(self.n_samples):
t = labels[i]
if sparse:
i_start = self.x.indptr[i]
i_end = self.x.indptr[i + 1]
v_indices = self.x.indices[i_start:i_end]
v_data = self.x.data[i_start:i_end]
inertia += (t_squared_norm[t] + self.x_squared_norm[i]
- 2 * np.dot(t_centroid_avg[t, v_indices], v_data))
else:
subtraction = t_centroid_avg[t, :] - self.x[i, :]
inertia += np.dot(subtraction, subtraction)
return inertia
def optimize(self, x_permutation, t_size, t_centroid_sum, t_centroid_avg,
t_squared_norm, labels, inertia, ref_labels=None):
return self.iterate(True, self.n_samples, self.x, self.x_squared_norm,
x_permutation, t_size, t_centroid_sum, t_centroid_avg,
t_squared_norm, labels, None, inertia, ref_labels)
def infer(self, n_samples, x, x_squared_norm, t_size, t_centroid_sum,
t_centroid_avg, t_squared_norm, labels, costs, ref_labels=None):
return self.iterate(False, n_samples, x, x_squared_norm, None, t_size,
t_centroid_sum, t_centroid_avg, t_squared_norm,
labels, costs, None, ref_labels)
def iterate(self, clustering_mode, n_samples, x, x_squared_norm, x_permutation,
t_size, t_centroid_sum, t_centroid_avg, t_squared_norm,
labels, costs, inertia, ref_labels=None):
n_changes = 0
total_cost = 0
if not self.sparse:
tmp_delta = np.empty_like(t_centroid_avg)
else:
tmp_delta = None
for i in range(n_samples):
x_id = x_permutation[i] if x_permutation is not None else i
old_t = labels[x_id]
if clustering_mode and t_size[old_t] == 1:
continue # if t is a singleton cluster we do not reduce it any further
# obtain local references
if self.sparse:
x_start = x.indptr[x_id]
x_end = x.indptr[x_id + 1]
x_indices = x.indices[x_start:x_end]
x_data = x.data[x_start:x_end]
x_squared_norm_x = x_squared_norm[x_id]
else:
x_indices = None
x_data = x[x_id, :]
x_squared_norm_x = None
if clustering_mode:
# withdraw x from its current cluster
if self.sparse:
t_centroid_sum[old_t, x_indices] -= x_data
dot_product = np.dot(x_data, t_centroid_sum[old_t, x_indices])
t_squared_norm[old_t] = (t_squared_norm[old_t] * (t_size[old_t] ** 2)
- x_squared_norm_x - 2 * dot_product) / ((t_size[old_t] - 1) ** 2)
else:
t_centroid_sum[old_t, :] -= x_data
np.multiply(t_centroid_sum[old_t, :], 1 / (t_size[old_t] - 1), out=t_centroid_avg[old_t, :])
t_size[old_t] -= 1
# select new_t
if self.sparse:
dot_product = (t_centroid_sum[:, x_indices] @ x_data) / t_size
tmp_costs = t_squared_norm + x_squared_norm_x - 2 * dot_product
else:
np.subtract(t_centroid_avg, x_data[np.newaxis, :], out=tmp_delta)
tmp_costs = (tmp_delta[:, None, :] @ tmp_delta[..., None]).ravel()
tmp_costs *= t_size / (t_size + 1)
new_t = np.argmin(tmp_costs).item()
if ref_labels is not None:
ref_t = ref_labels[x_id]
if new_t != ref_t and not np.isclose(tmp_costs[new_t], tmp_costs[ref_t]):
print("override t of cost=%.8f, with cost=%.8f" % (tmp_costs[new_t], tmp_costs[ref_t]))
new_t = ref_t
if clustering_mode:
# update membership
if self.sparse:
dot_product = np.dot(x_data, t_centroid_sum[new_t, x_indices])
t_squared_norm[new_t] = (t_squared_norm[new_t] * (t_size[new_t] ** 2)
+ x_squared_norm_x + 2 * dot_product) / ((t_size[new_t] + 1) ** 2)
t_centroid_sum[new_t, x_indices] += x_data
else:
t_centroid_sum[new_t, :] += x_data
np.multiply(t_centroid_sum[new_t, :], 1 / (t_size[new_t] + 1), out=t_centroid_avg[new_t, :])
t_size[new_t] += 1
# update stats
if new_t != old_t:
n_changes += 1
inertia -= tmp_costs[old_t] - tmp_costs[new_t]
else:
total_cost += tmp_costs[new_t]
costs[x_id, :] = tmp_costs
labels[x_id] = new_t
if clustering_mode:
return n_changes / self.n_samples if self.n_samples > 0 else 0, inertia
else:
return total_cost
|
the-stack_0_15125 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""The pty module handles pseudo-terminals.
Currently, the infrastructure here is only used to test llnl.util.tty.log.
If this is used outside a testing environment, we will want to reconsider
things like timeouts in ``ProcessController.wait()``, which are set to
get tests done quickly, not to avoid high CPU usage.
Note: The functionality in this module is unsupported on Windows
"""
from __future__ import print_function
import multiprocessing
import os
import re
import signal
import sys
import time
import traceback
import llnl.util.tty.log as log
from spack.util.executable import which
termios = None
try:
import termios as term_mod
termios = term_mod
except ImportError:
pass
class ProcessController(object):
"""Wrapper around some fundamental process control operations.
This allows one process (the controller) to drive another (the
minion) similar to the way a shell would, by sending signals and I/O.
"""
def __init__(self, pid, controller_fd,
timeout=1, sleep_time=1e-1, debug=False):
"""Create a controller to manipulate the process with id ``pid``
Args:
pid (int): id of process to control
controller_fd (int): controller fd attached to pid's stdin
timeout (int): time in seconds for wait operations to time out
(default 1 second)
sleep_time (int): time to sleep after signals, to control the
signal rate of the controller (default 1e-1)
debug (bool): whether ``horizontal_line()`` and ``status()`` should
produce output when called (default False)
``sleep_time`` allows the caller to insert delays after calls
that signal or modify the controlled process. Python behaves very
poorly if signals arrive too fast, and drowning a Python process
with a Python handler with signals can kill the process and hang
our tests, so we throttle this a closer-to-interactive rate.
"""
self.pid = pid
self.pgid = os.getpgid(pid)
self.controller_fd = controller_fd
self.timeout = timeout
self.sleep_time = sleep_time
self.debug = debug
# we need the ps command to wait for process statuses
self.ps = which("ps", required=True)
def get_canon_echo_attrs(self):
"""Get echo and canon attributes of the terminal of controller_fd."""
cfg = termios.tcgetattr(self.controller_fd)
return (
bool(cfg[3] & termios.ICANON),
bool(cfg[3] & termios.ECHO),
)
def horizontal_line(self, name):
"""Labled horizontal line for debugging."""
if self.debug:
sys.stderr.write(
"------------------------------------------- %s\n" % name
)
def status(self):
"""Print debug message with status info for the minion."""
if self.debug:
canon, echo = self.get_canon_echo_attrs()
sys.stderr.write("canon: %s, echo: %s\n" % (
"on" if canon else "off",
"on" if echo else "off",
))
sys.stderr.write("input: %s\n" % self.input_on())
sys.stderr.write("bg: %s\n" % self.background())
sys.stderr.write("\n")
def input_on(self):
"""True if keyboard input is enabled on the controller_fd pty."""
return self.get_canon_echo_attrs() == (False, False)
def background(self):
"""True if pgid is in a background pgroup of controller_fd's tty."""
return self.pgid != os.tcgetpgrp(self.controller_fd)
def tstp(self):
"""Send SIGTSTP to the controlled process."""
self.horizontal_line("tstp")
os.killpg(self.pgid, signal.SIGTSTP)
time.sleep(self.sleep_time)
def cont(self):
self.horizontal_line("cont")
os.killpg(self.pgid, signal.SIGCONT)
time.sleep(self.sleep_time)
def fg(self):
self.horizontal_line("fg")
with log.ignore_signal(signal.SIGTTOU):
os.tcsetpgrp(self.controller_fd, os.getpgid(self.pid))
time.sleep(self.sleep_time)
def bg(self):
self.horizontal_line("bg")
with log.ignore_signal(signal.SIGTTOU):
os.tcsetpgrp(self.controller_fd, os.getpgrp())
time.sleep(self.sleep_time)
def write(self, byte_string):
self.horizontal_line("write '%s'" % byte_string.decode("utf-8"))
os.write(self.controller_fd, byte_string)
def wait(self, condition):
start = time.time()
while (((time.time() - start) < self.timeout) and not condition()):
time.sleep(1e-2)
assert condition()
def wait_enabled(self):
self.wait(lambda: self.input_on() and not self.background())
def wait_disabled(self):
self.wait(lambda: not self.input_on() and self.background())
def wait_disabled_fg(self):
self.wait(lambda: not self.input_on() and not self.background())
def proc_status(self):
status = self.ps("-p", str(self.pid), "-o", "stat", output=str)
status = re.split(r"\s+", status.strip(), re.M)
return status[1]
def wait_stopped(self):
self.wait(lambda: "T" in self.proc_status())
def wait_running(self):
self.wait(lambda: "T" not in self.proc_status())
class PseudoShell(object):
"""Sets up controller and minion processes with a PTY.
You can create a ``PseudoShell`` if you want to test how some
function responds to terminal input. This is a pseudo-shell from a
job control perspective; ``controller_function`` and ``minion_function``
are set up with a pseudoterminal (pty) so that the controller can drive
the minion through process control signals and I/O.
The two functions should have signatures like this::
def controller_function(proc, ctl, **kwargs)
def minion_function(**kwargs)
``controller_function`` is spawned in its own process and passed three
arguments:
proc
the ``multiprocessing.Process`` object representing the minion
ctl
a ``ProcessController`` object tied to the minion
kwargs
keyword arguments passed from ``PseudoShell.start()``.
``minion_function`` is only passed ``kwargs`` delegated from
``PseudoShell.start()``.
The ``ctl.controller_fd`` will have its ``controller_fd`` connected to
``sys.stdin`` in the minion process. Both processes will share the
same ``sys.stdout`` and ``sys.stderr`` as the process instantiating
``PseudoShell``.
Here are the relationships between processes created::
._________________________________________________________.
| Minion Process | pid 2
| - runs minion_function | pgroup 2
|_________________________________________________________| session 1
^
| create process with controller_fd connected to stdin
| stdout, stderr are the same as caller
._________________________________________________________.
| Controller Process | pid 1
| - runs controller_function | pgroup 1
| - uses ProcessController and controller_fd to | session 1
| control minion |
|_________________________________________________________|
^
| create process
| stdin, stdout, stderr are the same as caller
._________________________________________________________.
| Caller | pid 0
| - Constructs, starts, joins PseudoShell | pgroup 0
| - provides controller_function, minion_function | session 0
|_________________________________________________________|
"""
def __init__(self, controller_function, minion_function):
self.proc = None
self.controller_function = controller_function
self.minion_function = minion_function
# these can be optionally set to change defaults
self.controller_timeout = 1
self.sleep_time = 0
def start(self, **kwargs):
"""Start the controller and minion processes.
Arguments:
kwargs (dict): arbitrary keyword arguments that will be
passed to controller and minion functions
The controller process will create the minion, then call
``controller_function``. The minion process will call
``minion_function``.
"""
self.proc = multiprocessing.Process(
target=PseudoShell._set_up_and_run_controller_function,
args=(self.controller_function, self.minion_function,
self.controller_timeout, self.sleep_time),
kwargs=kwargs,
)
self.proc.start()
def join(self):
"""Wait for the minion process to finish, and return its exit code."""
self.proc.join()
return self.proc.exitcode
@staticmethod
def _set_up_and_run_minion_function(
tty_name, stdout_fd, stderr_fd, ready, minion_function, **kwargs):
"""Minion process wrapper for PseudoShell.
Handles the mechanics of setting up a PTY, then calls
``minion_function``.
"""
# new process group, like a command or pipeline launched by a shell
os.setpgrp()
# take controlling terminal and set up pty IO
stdin_fd = os.open(tty_name, os.O_RDWR)
os.dup2(stdin_fd, sys.stdin.fileno())
os.dup2(stdout_fd, sys.stdout.fileno())
os.dup2(stderr_fd, sys.stderr.fileno())
os.close(stdin_fd)
if kwargs.get("debug"):
sys.stderr.write(
"minion: stdin.isatty(): %s\n" % sys.stdin.isatty())
# tell the parent that we're really running
if kwargs.get("debug"):
sys.stderr.write("minion: ready!\n")
ready.value = True
try:
minion_function(**kwargs)
except BaseException:
traceback.print_exc()
@staticmethod
def _set_up_and_run_controller_function(
controller_function, minion_function, controller_timeout,
sleep_time, **kwargs):
"""Set up a pty, spawn a minion process, execute controller_function.
Handles the mechanics of setting up a PTY, then calls
``controller_function``.
"""
os.setsid() # new session; this process is the controller
controller_fd, minion_fd = os.openpty()
pty_name = os.ttyname(minion_fd)
# take controlling terminal
pty_fd = os.open(pty_name, os.O_RDWR)
os.close(pty_fd)
ready = multiprocessing.Value('i', False)
minion_process = multiprocessing.Process(
target=PseudoShell._set_up_and_run_minion_function,
args=(pty_name, sys.stdout.fileno(), sys.stderr.fileno(),
ready, minion_function),
kwargs=kwargs,
)
minion_process.start()
# wait for subprocess to be running and connected.
while not ready.value:
time.sleep(1e-5)
pass
if kwargs.get("debug"):
sys.stderr.write("pid: %d\n" % os.getpid())
sys.stderr.write("pgid: %d\n" % os.getpgrp())
sys.stderr.write("sid: %d\n" % os.getsid(0))
sys.stderr.write("tcgetpgrp: %d\n" % os.tcgetpgrp(controller_fd))
sys.stderr.write("\n")
minion_pgid = os.getpgid(minion_process.pid)
sys.stderr.write("minion pid: %d\n" % minion_process.pid)
sys.stderr.write("minion pgid: %d\n" % minion_pgid)
sys.stderr.write(
"minion sid: %d\n" % os.getsid(minion_process.pid))
sys.stderr.write("\n")
sys.stderr.flush()
# set up controller to ignore SIGTSTP, like a shell
signal.signal(signal.SIGTSTP, signal.SIG_IGN)
# call the controller function once the minion is ready
try:
controller = ProcessController(
minion_process.pid, controller_fd, debug=kwargs.get("debug"))
controller.timeout = controller_timeout
controller.sleep_time = sleep_time
error = controller_function(minion_process, controller, **kwargs)
except BaseException:
error = 1
traceback.print_exc()
minion_process.join()
# return whether either the parent or minion failed
return error or minion_process.exitcode
|
the-stack_0_15128 | # MIT License
#
# Copyright (c) 2018 Mahmoud Aslan
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
def cyclic_learning_rate(global_step,
learning_rate=0.01,
max_lr=0.1,
step_size=20.,
gamma=0.99994,
mode='triangular',
name=None):
"""Applies cyclic learning rate (CLR).
From the paper:
Smith, Leslie N. "Cyclical learning
rates for training neural networks." 2017.
[https://arxiv.org/pdf/1506.01186.pdf]
This method lets the learning rate cyclically
vary between reasonable boundary values
achieving improved classification accuracy and
often in fewer iterations.
This code varies the learning rate linearly between the
minimum (learning_rate) and the maximum (max_lr).
It returns the cyclic learning rate. It is computed as:
```python
cycle = floor( 1 + global_step /
( 2 * step_size ) )
x = abs( global_step / step_size – 2 * cycle + 1 )
clr = learning_rate +
( max_lr – learning_rate ) * max( 0 , 1 - x )
```
Polices:
'triangular':
Default, linearly increasing then linearly decreasing the
learning rate at each cycle.
'triangular2':
The same as the triangular policy except the learning
rate difference is cut in half at the end of each cycle.
This means the learning rate difference drops after each cycle.
'exp_range':
The learning rate varies between the minimum and maximum
boundaries and each boundary value declines by an exponential
factor of: gamma^global_step.
Example: 'triangular2' mode cyclic learning rate.
'''python
...
global_step = tf.Variable(0, trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate=
clr.cyclic_learning_rate(global_step=global_step, mode='triangular2'))
train_op = optimizer.minimize(loss_op, global_step=global_step)
...
with tf.Session() as sess:
sess.run(init)
for step in range(1, num_steps+1):
assign_op = global_step.assign(step)
sess.run(assign_op)
...
'''
Args:
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the cyclic computation. Must not be negative.
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate which is the lower bound
of the cycle (default = 0.1).
max_lr: A scalar. The maximum learning rate boundary.
step_size: A scalar. The number of iterations in half a cycle.
The paper suggests step_size = 2-8 x training iterations in epoch.
gamma: constant in 'exp_range' mode:
gamma**(global_step)
mode: one of {triangular, triangular2, exp_range}.
Default 'triangular'.
Values correspond to policies detailed above.
name: String. Optional name of the operation. Defaults to
'CyclicLearningRate'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The cyclic
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns
a function which in turn returns the decayed learning
rate Tensor. This can be useful for changing the learning
rate value across different invocations of optimizer functions.
@end_compatibility
"""
if global_step is None:
raise ValueError("global_step is required for cyclic_learning_rate.")
with ops.name_scope(name, "CyclicLearningRate",
[learning_rate, global_step]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
step_size = math_ops.cast(step_size, dtype)
def cyclic_lr():
"""Helper to recompute learning rate; most helpful in eager-mode."""
# computing: cycle = floor( 1 + global_step / ( 2 * step_size ) )
double_step = math_ops.multiply(2., step_size)
global_div_double_step = math_ops.divide(global_step, double_step)
cycle = math_ops.floor(math_ops.add(1., global_div_double_step))
# computing: x = abs( global_step / step_size – 2 * cycle + 1 )
double_cycle = math_ops.multiply(2., cycle)
global_div_step = math_ops.divide(global_step, step_size)
tmp = math_ops.subtract(global_div_step, double_cycle)
x = math_ops.abs(math_ops.add(1., tmp))
# computing: clr = learning_rate + ( max_lr – learning_rate ) * max( 0, 1 - x )
a1 = math_ops.maximum(0., math_ops.subtract(1., x))
a2 = math_ops.subtract(max_lr, learning_rate)
clr = math_ops.multiply(a1, a2)
if mode == 'triangular2':
clr = math_ops.divide(clr, math_ops.cast(math_ops.pow(2, math_ops.cast(
cycle - 1, tf.int32)), tf.float32))
if mode == 'exp_range':
clr = math_ops.multiply(math_ops.pow(gamma, global_step), clr)
return math_ops.add(clr, learning_rate, name=name)
if not context.executing_eagerly():
cyclic_lr = cyclic_lr()
return cyclic_lr
|
the-stack_0_15129 | from unittest import TestCase
from mock import Mock, patch
from data import FlixsterMovieDetails, Actor, RottenTomatoesMovieDetails, Movie
from data.parsers.movies import get_flixster_movie_details, get_rotten_tomatoes_movie_details, parse_actors, \
parse_release_date, parse_trailer_url, parse_flixster_movie_details, parse_actor, parse_rotten_tomatoes_movie_details, \
parse_movie
class TestGetFlixsterDetails(TestCase):
def test_returns_none_for_missing_details(self):
self.assertIsNone(get_flixster_movie_details({}))
@patch("data.parsers.movies.parse_flixster_movie_details")
def test_returns_parsed_flixster_movie_details(self, mocked_details_parser):
details = "details"
mocked_details_parser.return_value = details
self.assertEqual(details, get_flixster_movie_details({"flixster": "foo"}))
class TestGetRottenTomatoesMovieDetails(TestCase):
def test_returns_none_for_missing_details(self):
self.assertIsNone(get_rotten_tomatoes_movie_details({}))
@patch("data.parsers.movies.parse_rotten_tomatoes_movie_details")
def test_returns_parsed_rotten_tomatoes_movie_details(self, mocked_details_parser):
details = "details"
mocked_details_parser.return_value = details
self.assertEqual(details, get_rotten_tomatoes_movie_details({"rottenTomatoes": "foo"}))
class TestGetReleaseDate(TestCase):
def test_returns_none_for_empty_release_date(self):
self.assertIsNone(parse_release_date(""))
@patch("dateutil.parser.parse")
def test_returns_parsed_date(self, mocked_date_parser):
parsed_date = "parsed date"
mocked_date_parser.return_value = parsed_date
self.assertEqual(parsed_date, parse_release_date("foo"))
class TestGetActors(TestCase):
@patch("data.parsers.movies.parse_actor")
def test_returns_actors(self, mocked_actors_parser):
parsed_actor = "parsed actor"
mocked_actors_parser.return_value = parsed_actor
expected = [parsed_actor, parsed_actor]
self.assertEqual(expected, parse_actors([1, 2]))
class TestGetTrailerUrl(TestCase):
def test_returns_none_for_empty_hd_trailer(self):
self.assertIsNone(parse_trailer_url({}))
def test_returns_hd_trailer(self):
self.assertEqual("foo", parse_trailer_url({"hd": "foo"}))
class TestParseFlixsterMovieDetails(TestCase):
average = "average"
not_interested_count = "not interested count"
likability_score = "likability score"
scores_count = "scores count"
want_to_see_count = "want to see count"
popcorn_score = "popcorn score"
movie_details = {
"average": average,
"numNotInterested": not_interested_count,
"likeability": likability_score,
"numScores": scores_count,
"numWantToSee": want_to_see_count,
"popcornScore": popcorn_score
}
expected = FlixsterMovieDetails(average_rating=average, not_interested_count=not_interested_count,
likability_score=likability_score, scores_count=scores_count,
want_to_see_count=want_to_see_count, popcorn_score=popcorn_score)
def test_parses_successfully(self):
self.assertEqual(self.expected, parse_flixster_movie_details(self.movie_details))
class TestParseActor(TestCase):
id = "id"
name = "name"
url = "url"
actor_details = {
"id": id,
"name": name,
"url": url
}
expected = Actor(fid=id, name=name, url=url)
def test_parses_successfully(self):
self.assertEqual(self.expected, parse_actor(self.actor_details))
class TestParseRottenTomatoesMovieDetails(TestCase):
rating = "rating"
is_certified_fresh = "certified fresh"
consensus = "consensus"
movie_details = {
"rating": rating,
"certifiedFresh": is_certified_fresh,
"consensus": consensus
}
expected = RottenTomatoesMovieDetails(rating=rating, is_certified_fresh=is_certified_fresh, consensus=consensus)
@patch("data.parsers.movies.clean_html")
def test_parses_successfully(self, mocked_html_cleaner):
mocked_html_cleaner.return_value = self.consensus
self.assertEqual(self.expected, parse_rotten_tomatoes_movie_details(self.movie_details))
class TestParseMovie(TestCase):
id = "id"
release_date = "release date"
title = "title"
mpaa_rating = "mpaa rating"
run_time = "run time"
is_live = "is live"
is_opening = "is opening"
trailer_url = "trailer url"
actors = "actors"
flixster_movie_details = "flixster movie details"
rotten_tomatoes_movie_details = "rotten tomatoes movie details"
reviews = "reviews"
movie_details = {
"id": id,
"releaseDate": release_date,
"title": title,
"mpaa": mpaa_rating,
"runningTime": run_time,
"isLive": is_live,
"isOpening": is_opening,
"trailer": trailer_url,
"actors": actors,
"reviews": reviews
}
@patch("data.parsers.movies.parse_release_date")
@patch("data.parsers.movies.parse_trailer_url")
@patch("data.parsers.movies.parse_actors")
@patch("data.parsers.movies.get_flixster_movie_details")
@patch("data.parsers.movies.get_rotten_tomatoes_movie_details")
def test_parses_successfully(self, mocked_rotten_tomatoes_movie_details, mocked_flixster_movie_details, mocked_actors, mocked_trailer_url, mocked_release_date):
rotten_tomatoes_movie_details = "mocked rotten tomatoes movie details"
flixster_movie_details = "mocked flixster movie details"
actors = "mocked actors"
trailer_url = "mocked trailer url"
release_date = "mocked release date"
mocked_rotten_tomatoes_movie_details.return_value = rotten_tomatoes_movie_details
mocked_flixster_movie_details.return_value = flixster_movie_details
mocked_actors.return_value = actors
mocked_trailer_url.return_value = trailer_url
mocked_release_date.return_value = release_date
expected = Movie(fid=self.id, release_date=release_date, title=self.title, mpaa_rating=self.mpaa_rating,
run_time=self.run_time, is_live=self.is_live, is_opening=self.is_opening, trailer_url=trailer_url,
actors=actors, flixster_movie_details=flixster_movie_details, rotten_tomatoes_movie_details=rotten_tomatoes_movie_details)
self.assertEqual(expected, parse_movie(self.movie_details)) |
the-stack_0_15130 | # -*- coding=utf -*-
from __future__ import absolute_import
from cubes.browser import *
from cubes.errors import *
from cubes.model import *
from .store import DEFAULT_TIME_HIERARCHY
from .utils import *
from collections import defaultdict
from datetime import datetime
import pytz
class _MixpanelResponseAggregator(object):
def __init__(self, browser, responses, aggregate_names, drilldown, split,
actual_time_level):
"""Aggregator for multiple mixpanel responses (multiple dimensions)
with drill-down post-aggregation.
Arguments:
* `browser` – owning browser
* `reposnes` – mixpanel responses by `measure_names`
* `aggregate_names` – list of collected measures
* `drilldown` – a `Drilldown` object from the browser aggregation
query
* `split` - a split Cell object from the browser aggregation query
Object attributes:
* `aggregate_names` – list of measure names from the response
* `aggregate_data` – a dictionary where keys are measure names and
values are actual data points.
* `time_cells` – an ordered dictionary of collected cells from the
response. Key is time path, value is cell contents without the time
dimension.
"""
self.browser = browser
self.logger = browser.logger
self.drilldown = drilldown
self.aggregate_names = aggregate_names
self.actual_time_level = actual_time_level
# Extract the data
self.aggregate_data = {}
for aggregate in aggregate_names:
self.aggregate_data = responses[aggregate]["data"]["values"]
# Get time drilldown levels, if we are drilling through time
time_drilldowns = drilldown.drilldown_for_dimension("time")
if time_drilldowns:
time_drilldown = time_drilldowns[0]
self.last_time_level = str(time_drilldown.levels[-1])
self.time_levels = ["time."+str(l) for l in time_drilldown.levels]
self.time_hierarchy = str(time_drilldown.hierarchy)
else:
time_drilldown = None
self.last_time_level = None
self.time_levels = []
self.time_hierarchy = DEFAULT_TIME_HIERARCHY
self.drilldown_on = None
for obj in drilldown:
if obj.dimension.name != "time":
# this is a DrilldownItem object. represent it as 'dim.level' or just 'dim' if flat
self.drilldown_on = ( "%s.%s" % (obj.dimension.name, obj.levels[-1].name) ) if ( not obj.dimension.is_flat ) else obj.dimension.name
self.drilldown_on_value_func = lambda x: x
if self.drilldown_on is None and split:
self.drilldown_on = SPLIT_DIMENSION_NAME
self.drilldown_on_value_func = lambda x: True if x == "true" else False
# Time-keyed cells:
# (time_path, group) -> dictionary
self.time_cells = {}
self.cells = []
# Do it:
#
# Collect, Map&Reduce, Order
# ==========================
#
# Process the response. The methods are operating on the instance
# variable `time_cells`
self._collect_cells()
# TODO: handle week
if actual_time_level != self.last_time_level:
self._reduce_cells()
self._finalize_cells()
# Result is stored in the `cells` instance variable.
def _collect_cells(self):
for aggregate in self.aggregate_names:
self._collect_aggregate_cells(aggregate)
def _collect_aggregate_cells(self, aggregate):
"""Collects the cells from the response in a time series dictionary
`time_cells` where keys are tuples: `(time_path, group)`. `group` is
drill-down key value for the cell, such as `New York` for `city`."""
# Note: For no-drilldown this would be only one pass and group will be
# a cube name
# TODO: To add multiple drill-down dimensions in the future, add them
# to the `group` part of the key tuple
for group_key, group_series in self.aggregate_data.items():
for time_key, value in group_series.items():
time_path = time_to_path(time_key, self.last_time_level,
self.time_hierarchy)
key = (time_path, group_key)
# self.logger.debug("adding cell %s" % (key, ))
cell = self.time_cells.setdefault(key, {})
cell[aggregate] = value
# FIXME: do this only on drilldown
if self.drilldown_on:
cell[self.drilldown_on] = group_key
def _reduce_cells(self):
"""Reduce the cells according to the time dimensions."""
def reduce_cell(result, cell):
# We assume only _sum aggergation
# All measures should be prepared so we can to this
for aggregate in self.aggregate_names:
result[aggregate] = result.get(aggregate, 0) + \
cell.get(aggregate, 0)
return result
# 1. Map cells to reduced time path
#
reduced_map = defaultdict(list)
reduced_len = len(self.time_levels)
for key, cell in self.time_cells.items():
time_path = key[0]
reduced_path = time_path[0:reduced_len]
reduced_key = (reduced_path, key[1])
# self.logger.debug("reducing %s -> %s" % (key, reduced_key))
reduced_map[reduced_key].append(cell)
self.browser.logger.debug("response cell count: %s reduced to: %s" %
(len(self.time_cells), len(reduced_map)))
# 2. Reduce the cells
#
# See the function reduce_cell() above for aggregation:
#
reduced_cells = {}
for key, cells in reduced_map.items():
# self.browser.logger.debug("Reducing: %s -> %s" % (key, cells))
cell = reduce(reduce_cell, cells, {})
reduced_cells[key] = cell
self.time_cells = reduced_cells
def _finalize_cells(self):
"""Orders the `time_cells` according to the time and "the other"
dimension and puts the result into the `cells` instance variable.
This method also adds the time dimension keys."""
# Order by time (as path) and then drilldown dimension value (group)
# The key[0] is a list of paths: time, another_drilldown
order = lambda left, right: cmp(left[0], right[0])
cells = self.time_cells.items()
cells.sort(order)
# compute the current datetime, convert to path
current_time_path = time_to_path(
pytz.timezone('UTC').localize(datetime.utcnow()).astimezone(self.browser.timezone).strftime("%Y-%m-%d %H:00:00"),
self.last_time_level,
self.time_hierarchy)
self.cells = []
for key, cell in cells:
# If we are aggregating at finer granularity than "all":
time_key = key[0]
if time_key:
# if time_key ahead of current time path, discard
if time_key > current_time_path:
continue
cell.update(zip(self.time_levels, time_key))
# append the drilldown_on attribute ref
if self.drilldown_on:
cell[self.drilldown_on] = self.drilldown_on_value_func(key[1])
self.cells.append(cell)
|
the-stack_0_15131 | # -*- coding: utf-8 -*-
"""Test that models can be executed."""
import importlib
import os
import unittest
from typing import Optional
import numpy
import torch
import pykeen.experiments
import pykeen.models
from pykeen.models import (
ERModel, EntityEmbeddingModel, EntityRelationEmbeddingModel, Model, MultimodalModel, _MODELS,
_NewAbstractModel, _OldAbstractModel, model_resolver,
)
from pykeen.models.predict import get_novelty_mask, predict
from pykeen.models.unimodal.trans_d import _project_entity
from pykeen.nn import Embedding
from pykeen.utils import all_in_bounds, clamp_norm, extend_batch
from tests import cases
from tests.constants import EPSILON
SKIP_MODULES = {
Model.__name__,
_OldAbstractModel.__name__,
_NewAbstractModel.__name__,
'DummyModel',
MultimodalModel.__name__,
EntityEmbeddingModel.__name__,
EntityRelationEmbeddingModel.__name__,
ERModel.__name__,
'MockModel',
'SimpleInteractionModel',
}
for cls in MultimodalModel.__subclasses__():
SKIP_MODULES.add(cls.__name__)
class TestComplex(cases.ModelTestCase):
"""Test the ComplEx model."""
model_cls = pykeen.models.ComplEx
class TestConvE(cases.ModelTestCase):
"""Test the ConvE model."""
model_cls = pykeen.models.ConvE
embedding_dim = 12
create_inverse_triples = True
model_kwargs = {
'output_channels': 2,
'embedding_height': 3,
'embedding_width': 4,
}
# 3x batch norm: bias + scale --> 6
# entity specific bias --> 1
# ==================================
# 7
num_constant_init = 7
class TestConvKB(cases.ModelTestCase):
"""Test the ConvKB model."""
model_cls = pykeen.models.ConvKB
model_kwargs = {
'num_filters': 2,
}
# two bias terms, one conv-filter
num_constant_init = 3
class TestDistMult(cases.ModelTestCase):
"""Test the DistMult model."""
model_cls = pykeen.models.DistMult
def _check_constraints(self):
"""Check model constraints.
Entity embeddings have to have unit L2 norm.
"""
entity_norms = self.model.entity_embeddings(indices=None).norm(p=2, dim=-1)
assert torch.allclose(entity_norms, torch.ones_like(entity_norms))
def _test_score_all_triples(self, k: Optional[int], batch_size: int = 16):
"""Test score_all_triples.
:param k: The number of triples to return. Set to None, to keep all.
:param batch_size: The batch size to use for calculating scores.
"""
top_triples, top_scores = predict(model=self.model, batch_size=batch_size, k=k)
# check type
assert torch.is_tensor(top_triples)
assert torch.is_tensor(top_scores)
assert top_triples.dtype == torch.long
assert top_scores.dtype == torch.float32
# check shape
actual_k, n_cols = top_triples.shape
assert n_cols == 3
if k is None:
assert actual_k == self.factory.num_entities ** 2 * self.factory.num_relations
else:
assert actual_k == min(k, self.factory.num_triples)
assert top_scores.shape == (actual_k,)
# check ID ranges
assert (top_triples >= 0).all()
assert top_triples[:, [0, 2]].max() < self.model.num_entities
assert top_triples[:, 1].max() < self.model.num_relations
def test_score_all_triples(self):
"""Test score_all_triples with a large batch size."""
# this is only done in one of the models
self._test_score_all_triples(k=15, batch_size=16)
def test_score_all_triples_singleton_batch(self):
"""Test score_all_triples with a batch size of 1."""
self._test_score_all_triples(k=15, batch_size=1)
def test_score_all_triples_large_batch(self):
"""Test score_all_triples with a batch size larger than k."""
self._test_score_all_triples(k=10, batch_size=16)
def test_score_all_triples_keep_all(self):
"""Test score_all_triples with k=None."""
# this is only done in one of the models
self._test_score_all_triples(k=None)
class TestERMLP(cases.ModelTestCase):
"""Test the ERMLP model."""
model_cls = pykeen.models.ERMLP
model_kwargs = {
'hidden_dim': 4,
}
# Two linear layer biases
num_constant_init = 2
class TestERMLPE(cases.ModelTestCase):
"""Test the extended ERMLP model."""
model_cls = pykeen.models.ERMLPE
model_kwargs = {
'hidden_dim': 4,
}
# Two BN layers, bias & scale
num_constant_init = 4
class TestHolE(cases.ModelTestCase):
"""Test the HolE model."""
model_cls = pykeen.models.HolE
def _check_constraints(self):
"""Check model constraints.
Entity embeddings have to have at most unit L2 norm.
"""
assert all_in_bounds(self.model.entity_embeddings(indices=None).norm(p=2, dim=-1), high=1., a_tol=EPSILON)
class TestKG2EWithKL(cases.BaseKG2ETest):
"""Test the KG2E model with KL similarity."""
model_kwargs = {
'dist_similarity': 'KL',
}
class TestMuRE(cases.ModelTestCase):
"""Test the MuRE model."""
model_cls = pykeen.models.MuRE
num_constant_init = 2 # biases
class TestKG2EWithEL(cases.BaseKG2ETest):
"""Test the KG2E model with EL similarity."""
model_kwargs = {
'dist_similarity': 'EL',
}
class TestNTNLowMemory(cases.BaseNTNTest):
"""Test the NTN model with automatic memory optimization."""
model_kwargs = {
'num_slices': 2,
}
training_loop_kwargs = {
'automatic_memory_optimization': True,
}
class TestNTNHighMemory(cases.BaseNTNTest):
"""Test the NTN model without automatic memory optimization."""
model_kwargs = {
'num_slices': 2,
}
training_loop_kwargs = {
'automatic_memory_optimization': False,
}
class TestProjE(cases.ModelTestCase):
"""Test the ProjE model."""
model_cls = pykeen.models.ProjE
class TestRESCAL(cases.ModelTestCase):
"""Test the RESCAL model."""
model_cls = pykeen.models.RESCAL
class TestRGCNBasis(cases.BaseRGCNTest):
"""Test the R-GCN model."""
model_kwargs = {
'interaction': "transe",
'interaction_kwargs': dict(p=1),
'decomposition': "bases",
"decomposition_kwargs": dict(
num_bases=3,
),
}
#: one bias per layer
num_constant_init = 2
class TestRGCNBlock(cases.BaseRGCNTest):
"""Test the R-GCN model with block decomposition."""
embedding_dim = 6
model_kwargs = {
'interaction': "distmult",
'decomposition': "block",
"decomposition_kwargs": dict(
num_blocks=3,
),
'edge_weighting': "symmetric",
'use_batch_norm': True,
}
#: (scale & bias for BN) * layers
num_constant_init = 4
class TestRotatE(cases.ModelTestCase):
"""Test the RotatE model."""
model_cls = pykeen.models.RotatE
def _check_constraints(self):
"""Check model constraints.
Relation embeddings' entries have to have absolute value 1 (i.e. represent a rotation in complex plane)
"""
relation_abs = (
self.model
.relation_embeddings(indices=None)
.view(self.factory.num_relations, -1, 2)
.norm(p=2, dim=-1)
)
assert torch.allclose(relation_abs, torch.ones_like(relation_abs))
class TestSimplE(cases.ModelTestCase):
"""Test the SimplE model."""
model_cls = pykeen.models.SimplE
class _BaseTestSE(cases.ModelTestCase):
"""Test the Structured Embedding model."""
model_cls = pykeen.models.StructuredEmbedding
def _check_constraints(self):
"""Check model constraints.
Entity embeddings have to have unit L2 norm.
"""
norms = self.model.entity_embeddings(indices=None).norm(p=2, dim=-1)
assert torch.allclose(norms, torch.ones_like(norms))
class TestSELowMemory(_BaseTestSE):
"""Tests SE with low memory."""
training_loop_kwargs = {
'automatic_memory_optimization': True,
}
class TestSEHighMemory(_BaseTestSE):
"""Tests SE with low memory."""
training_loop_kwargs = {
'automatic_memory_optimization': False,
}
class TestTransD(cases.DistanceModelTestCase):
"""Test the TransD model."""
model_cls = pykeen.models.TransD
model_kwargs = {
'relation_dim': 4,
}
def _check_constraints(self):
"""Check model constraints.
Entity and relation embeddings have to have at most unit L2 norm.
"""
for emb in (self.model.entity_embeddings, self.model.relation_embeddings):
assert all_in_bounds(emb(indices=None).norm(p=2, dim=-1), high=1., a_tol=EPSILON)
def test_score_hrt_manual(self):
"""Manually test interaction function of TransD."""
# entity embeddings
weights = torch.as_tensor(data=[[2., 2.], [4., 4.]], dtype=torch.float)
entity_embeddings = Embedding(
num_embeddings=2,
embedding_dim=2,
)
entity_embeddings._embeddings.weight.data.copy_(weights)
self.model.entity_embeddings = entity_embeddings
projection_weights = torch.as_tensor(data=[[3., 3.], [2., 2.]], dtype=torch.float)
entity_projection_embeddings = Embedding(
num_embeddings=2,
embedding_dim=2,
)
entity_projection_embeddings._embeddings.weight.data.copy_(projection_weights)
self.model.entity_projections = entity_projection_embeddings
# relation embeddings
relation_weights = torch.as_tensor(data=[[4.], [4.]], dtype=torch.float)
relation_embeddings = Embedding(
num_embeddings=2,
embedding_dim=1,
)
relation_embeddings._embeddings.weight.data.copy_(relation_weights)
self.model.relation_embeddings = relation_embeddings
relation_projection_weights = torch.as_tensor(data=[[5.], [3.]], dtype=torch.float)
relation_projection_embeddings = Embedding(
num_embeddings=2,
embedding_dim=1,
)
relation_projection_embeddings._embeddings.weight.data.copy_(relation_projection_weights)
self.model.relation_projections = relation_projection_embeddings
# Compute Scores
batch = torch.as_tensor(data=[[0, 0, 0], [0, 0, 1]], dtype=torch.long)
scores = self.model.score_hrt(hrt_batch=batch)
self.assertEqual(scores.shape[0], 2)
self.assertEqual(scores.shape[1], 1)
first_score = scores[0].item()
self.assertAlmostEqual(first_score, -16, delta=0.01)
# Use different dimension for relation embedding: relation_dim > entity_dim
# relation embeddings
relation_weights = torch.as_tensor(data=[[3., 3., 3.], [3., 3., 3.]], dtype=torch.float)
relation_embeddings = Embedding(
num_embeddings=2,
embedding_dim=3,
)
relation_embeddings._embeddings.weight.data.copy_(relation_weights)
self.model.relation_embeddings = relation_embeddings
relation_projection_weights = torch.as_tensor(data=[[4., 4., 4.], [4., 4., 4.]], dtype=torch.float)
relation_projection_embeddings = Embedding(
num_embeddings=2,
embedding_dim=3,
)
relation_projection_embeddings._embeddings.weight.data.copy_(relation_projection_weights)
self.model.relation_projections = relation_projection_embeddings
# Compute Scores
batch = torch.as_tensor(data=[[0, 0, 0]], dtype=torch.long)
scores = self.model.score_hrt(hrt_batch=batch)
self.assertAlmostEqual(scores.item(), -27, delta=0.01)
batch = torch.as_tensor(data=[[0, 0, 0], [0, 0, 0]], dtype=torch.long)
scores = self.model.score_hrt(hrt_batch=batch)
self.assertEqual(scores.shape[0], 2)
self.assertEqual(scores.shape[1], 1)
first_score = scores[0].item()
second_score = scores[1].item()
self.assertAlmostEqual(first_score, -27, delta=0.01)
self.assertAlmostEqual(second_score, -27, delta=0.01)
# Use different dimension for relation embedding: relation_dim < entity_dim
# entity embeddings
weights = torch.as_tensor(data=[[1., 1., 1.], [1., 1., 1.]], dtype=torch.float)
entity_embeddings = Embedding(
num_embeddings=2,
embedding_dim=3,
)
entity_embeddings._embeddings.weight.data.copy_(weights)
self.model.entity_embeddings = entity_embeddings
projection_weights = torch.as_tensor(data=[[2., 2., 2.], [2., 2., 2.]], dtype=torch.float)
entity_projection_embeddings = Embedding(
num_embeddings=2,
embedding_dim=3,
)
entity_projection_embeddings._embeddings.weight.data.copy_(projection_weights)
self.model.entity_projections = entity_projection_embeddings
# relation embeddings
relation_weights = torch.as_tensor(data=[[3., 3.], [3., 3.]], dtype=torch.float)
relation_embeddings = Embedding(
num_embeddings=2,
embedding_dim=2,
)
relation_embeddings._embeddings.weight.data.copy_(relation_weights)
self.model.relation_embeddings = relation_embeddings
relation_projection_weights = torch.as_tensor(data=[[4., 4.], [4., 4.]], dtype=torch.float)
relation_projection_embeddings = Embedding(
num_embeddings=2,
embedding_dim=2,
)
relation_projection_embeddings._embeddings.weight.data.copy_(relation_projection_weights)
self.model.relation_projections = relation_projection_embeddings
# Compute Scores
batch = torch.as_tensor(data=[[0, 0, 0], [0, 0, 0]], dtype=torch.long)
scores = self.model.score_hrt(hrt_batch=batch)
self.assertEqual(scores.shape[0], 2)
self.assertEqual(scores.shape[1], 1)
first_score = scores[0].item()
second_score = scores[1].item()
self.assertAlmostEqual(first_score, -18, delta=0.01)
self.assertAlmostEqual(second_score, -18, delta=0.01)
def test_project_entity(self):
"""Test _project_entity."""
# random entity embeddings & projections
e = torch.rand(1, self.model.num_entities, self.embedding_dim, generator=self.generator)
e = clamp_norm(e, maxnorm=1, p=2, dim=-1)
e_p = torch.rand(1, self.model.num_entities, self.embedding_dim, generator=self.generator)
# random relation embeddings & projections
r = torch.rand(self.batch_size, 1, self.model.relation_dim, generator=self.generator)
r = clamp_norm(r, maxnorm=1, p=2, dim=-1)
r_p = torch.rand(self.batch_size, 1, self.model.relation_dim, generator=self.generator)
# project
e_bot = _project_entity(e=e, e_p=e_p, r=r, r_p=r_p)
# check shape:
assert e_bot.shape == (self.batch_size, self.model.num_entities, self.model.relation_dim)
# check normalization
assert (torch.norm(e_bot, dim=-1, p=2) <= 1.0 + 1.0e-06).all()
class TestTransE(cases.DistanceModelTestCase):
"""Test the TransE model."""
model_cls = pykeen.models.TransE
def _check_constraints(self):
"""Check model constraints.
Entity embeddings have to have unit L2 norm.
"""
entity_norms = self.model.entity_embeddings(indices=None).norm(p=2, dim=-1)
assert torch.allclose(entity_norms, torch.ones_like(entity_norms))
class TestTransH(cases.DistanceModelTestCase):
"""Test the TransH model."""
model_cls = pykeen.models.TransH
def _check_constraints(self):
"""Check model constraints.
Entity embeddings have to have unit L2 norm.
"""
entity_norms = self.model.normal_vector_embeddings(indices=None).norm(p=2, dim=-1)
assert torch.allclose(entity_norms, torch.ones_like(entity_norms))
class TestTransR(cases.DistanceModelTestCase):
"""Test the TransR model."""
model_cls = pykeen.models.TransR
model_kwargs = {
'relation_dim': 4,
}
def test_score_hrt_manual(self):
"""Manually test interaction function of TransR."""
# entity embeddings
weights = torch.as_tensor(data=[[2., 2.], [3., 3.]], dtype=torch.float)
entity_embeddings = Embedding(
num_embeddings=2,
embedding_dim=2,
)
entity_embeddings._embeddings.weight.data.copy_(weights)
self.model.entity_embeddings = entity_embeddings
# relation embeddings
relation_weights = torch.as_tensor(data=[[4., 4], [5., 5.]], dtype=torch.float)
relation_embeddings = Embedding(
num_embeddings=2,
embedding_dim=2,
)
relation_embeddings._embeddings.weight.data.copy_(relation_weights)
self.model.relation_embeddings = relation_embeddings
relation_projection_weights = torch.as_tensor(data=[[5., 5., 6., 6.], [7., 7., 8., 8.]], dtype=torch.float)
relation_projection_embeddings = Embedding(
num_embeddings=2,
embedding_dim=4,
)
relation_projection_embeddings._embeddings.weight.data.copy_(relation_projection_weights)
self.model.relation_projections = relation_projection_embeddings
# Compute Scores
batch = torch.as_tensor(data=[[0, 0, 0], [0, 0, 1]], dtype=torch.long)
scores = self.model.score_hrt(hrt_batch=batch)
self.assertEqual(scores.shape[0], 2)
self.assertEqual(scores.shape[1], 1)
first_score = scores[0].item()
# second_score = scores[1].item()
self.assertAlmostEqual(first_score, -32, delta=0.01)
def _check_constraints(self):
"""Check model constraints.
Entity and relation embeddings have to have at most unit L2 norm.
"""
for emb in (self.model.entity_embeddings, self.model.relation_embeddings):
assert all_in_bounds(emb(indices=None).norm(p=2, dim=-1), high=1., a_tol=1.0e-06)
class TestTuckEr(cases.ModelTestCase):
"""Test the TuckEr model."""
model_cls = pykeen.models.TuckER
model_kwargs = {
'relation_dim': 4,
}
#: 2xBN (bias & scale)
num_constant_init = 4
class TestUM(cases.DistanceModelTestCase):
"""Test the Unstructured Model."""
model_cls = pykeen.models.UnstructuredModel
class TestTesting(unittest.TestCase):
"""Yo dawg, I heard you like testing, so I wrote a test to test the tests so you can test while you're testing."""
def test_documentation(self):
"""Test all models have appropriate structured documentation."""
for name, model_cls in sorted(model_resolver.lookup_dict.items()):
with self.subTest(name=name):
try:
docdata = model_cls.__docdata__
except AttributeError:
self.fail('missing __docdata__')
self.assertIn('citation', docdata)
self.assertIn('author', docdata['citation'])
self.assertIn('link', docdata['citation'])
self.assertIn('year', docdata['citation'])
def test_testing(self):
"""Check that there's a test for all models.
For now, this is excluding multimodel models. Not sure how to test those yet.
"""
model_names = {
model_cls.__name__
for model_cls in model_resolver.lookup_dict.values()
if not issubclass(model_cls, ERModel)
}
model_names -= SKIP_MODULES
tested_model_names = {
value.model_cls.__name__
for name, value in globals().items()
if (
isinstance(value, type)
and issubclass(value, cases.ModelTestCase)
and not name.startswith('_')
and not issubclass(value.model_cls, (ERModel, MultimodalModel))
)
}
tested_model_names -= SKIP_MODULES
self.assertEqual(model_names, tested_model_names, msg='Some models have not been tested')
def test_importing(self):
"""Test that all models are available from :mod:`pykeen.models`."""
models_path = os.path.abspath(os.path.dirname(pykeen.models.__file__))
model_names = set()
for directory, _, filenames in os.walk(models_path):
for filename in filenames:
if not filename.endswith('.py'):
continue
path = os.path.join(directory, filename)
relpath = os.path.relpath(path, models_path)
if relpath.endswith('__init__.py'):
continue
import_path = 'pykeen.models.' + relpath[:-len('.py')].replace(os.sep, '.')
module = importlib.import_module(import_path)
for name in dir(module):
value = getattr(module, name)
if (
isinstance(value, type)
and issubclass(value, Model)
):
model_names.add(value.__name__)
star_model_names = _remove_non_models(set(pykeen.models.__all__) - SKIP_MODULES)
model_names = _remove_non_models(model_names - SKIP_MODULES)
self.assertEqual(model_names, star_model_names, msg='Forgot to add some imports')
def test_models_have_experiments(self):
"""Test that each model has an experiment folder in :mod:`pykeen.experiments`."""
experiments_path = os.path.abspath(os.path.dirname(pykeen.experiments.__file__))
experiment_blacklist = {
'DistMultLiteral', # FIXME
'ComplExLiteral', # FIXME
'UnstructuredModel',
'StructuredEmbedding',
'RESCAL',
'NTN',
'ERMLP',
'ProjE', # FIXME
'ERMLPE', # FIXME
'PairRE',
}
model_names = _remove_non_models(set(pykeen.models.__all__) - SKIP_MODULES - experiment_blacklist)
for model in _remove_non_models(model_names):
with self.subTest(model=model):
self.assertTrue(
os.path.exists(os.path.join(experiments_path, model.lower())),
msg=f'Missing experimental configuration for {model}',
)
def _remove_non_models(elements):
rv = set()
for element in elements:
try:
model_resolver.lookup(element)
except ValueError: # invalid model name - aka not actually a model
continue
else:
rv.add(element)
return rv
class TestModelUtilities(unittest.TestCase):
"""Extra tests for utility functions."""
def test_abstract(self):
"""Test that classes are checked as abstract properly."""
self.assertTrue(EntityEmbeddingModel._is_base_model)
self.assertTrue(EntityRelationEmbeddingModel._is_base_model)
self.assertTrue(MultimodalModel._is_base_model)
for model_cls in _MODELS:
self.assertFalse(
model_cls._is_base_model,
msg=f'{model_cls.__name__} should not be marked as a a base model',
)
def test_get_novelty_mask(self):
"""Test `get_novelty_mask()`."""
num_triples = 7
base = torch.arange(num_triples)
mapped_triples = torch.stack([base, base, 3 * base], dim=-1)
query_ids = torch.randperm(num_triples).numpy()[:num_triples // 2]
exp_novel = query_ids != 0
col = 2
other_col_ids = numpy.asarray([0, 0])
mask = get_novelty_mask(
mapped_triples=mapped_triples,
query_ids=query_ids,
col=col,
other_col_ids=other_col_ids,
)
assert mask.shape == query_ids.shape
assert (mask == exp_novel).all()
def test_extend_batch(self):
"""Test `_extend_batch()`."""
batch = torch.tensor([[a, b] for a in range(3) for b in range(4)]).view(-1, 2)
all_ids = [2 * i for i in range(5)]
batch_size = batch.shape[0]
num_choices = len(all_ids)
for dim in range(3):
h_ext_batch = extend_batch(batch=batch, all_ids=all_ids, dim=dim)
# check shape
assert h_ext_batch.shape == (batch_size * num_choices, 3)
# check content
actual_content = set(tuple(map(int, hrt)) for hrt in h_ext_batch)
exp_content = set()
for i in all_ids:
for b in batch:
c = list(map(int, b))
c.insert(dim, i)
exp_content.add(tuple(c))
assert actual_content == exp_content
|
the-stack_0_15132 | import grpc
import hello_pb2
import hello_pb2_grpc
def run():
# connect grpc server
channel = grpc.insecure_channel('localhost:8089')
# send grpc
stub = hello_pb2_grpc.GreeterStub(channel)
response = stub.SayHello(hello_pb2.Request(name = 'cpx'))
print("Greeter client received: " + response.message)
if __name__ == '__main__':
run() |
the-stack_0_15134 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from datetime import datetime, timedelta
from sentry.constants import STATUS_RESOLVED, STATUS_UNRESOLVED
from sentry.models import GroupBookmark, GroupTagValue
from sentry.search.django.backend import DjangoSearchBackend
from sentry.testutils import TestCase
class DjangoSearchBackendTest(TestCase):
def create_backend(self):
return DjangoSearchBackend()
def setUp(self):
self.backend = self.create_backend()
self.project1 = self.create_project(name='foo')
self.project2 = self.create_project(name='bar')
self.group1 = self.create_group(
project=self.project1,
checksum='a' * 32,
message='foo',
times_seen=5,
status=STATUS_UNRESOLVED,
last_seen=datetime(2013, 8, 13, 3, 8, 24, 880386),
first_seen=datetime(2013, 7, 13, 3, 8, 24, 880386),
)
self.event1 = self.create_event(
event_id='a' * 32,
group=self.group1,
tags={
'server': 'example.com',
'env': 'production',
}
)
self.group2 = self.create_group(
project=self.project1,
checksum='b' * 32,
message='bar',
times_seen=10,
status=STATUS_RESOLVED,
last_seen=datetime(2013, 7, 14, 3, 8, 24, 880386),
first_seen=datetime(2013, 7, 14, 3, 8, 24, 880386),
)
self.event2 = self.create_event(
event_id='b' * 32,
group=self.group2,
tags={
'server': 'example.com',
'env': 'staging',
'url': 'http://example.com',
}
)
for key, value in self.event1.data['tags']:
GroupTagValue.objects.create(
group=self.group1,
key=key,
value=value,
)
for key, value in self.event2.data['tags']:
GroupTagValue.objects.create(
group=self.group2,
key=key,
value=value,
)
GroupBookmark.objects.create(
user=self.user,
group=self.group2,
project=self.group2.project,
)
self.backend.index(self.event1)
self.backend.index(self.event2)
def test_query(self):
backend = self.create_backend()
results = self.backend.query(self.project1, query='foo')
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(self.project1, query='bar')
assert len(results) == 1
assert results[0] == self.group2
def test_sort(self):
backend = self.create_backend()
results = self.backend.query(self.project1, sort_by='date')
assert len(results) == 2
assert results[0] == self.group1
assert results[1] == self.group2
results = self.backend.query(self.project1, sort_by='new')
assert len(results) == 2
assert results[0] == self.group2
assert results[1] == self.group1
results = self.backend.query(self.project1, sort_by='freq')
assert len(results) == 2
assert results[0] == self.group2
assert results[1] == self.group1
def test_status(self):
results = self.backend.query(self.project1, status=STATUS_UNRESOLVED)
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(self.project1, status=STATUS_RESOLVED)
assert len(results) == 1
assert results[0] == self.group2
def test_tags(self):
results = self.backend.query(self.project1, tags={'env': 'staging'})
assert len(results) == 1
assert results[0] == self.group2
results = self.backend.query(self.project1, tags={'env': 'example.com'})
assert len(results) == 0
def test_bookmarked_by(self):
results = self.backend.query(self.project1, bookmarked_by=self.user)
assert len(results) == 1
assert results[0] == self.group2
def test_project(self):
results = self.backend.query(self.project2)
assert len(results) == 0
def test_limit_and_offset(self):
results = self.backend.query(self.project1, limit=1)
assert len(results) == 1
results = self.backend.query(self.project1, offset=1, limit=1)
assert len(results) == 1
results = self.backend.query(self.project1, offset=2, limit=1)
assert len(results) == 0
def test_first_seen_date_filter(self):
backend = self.create_backend()
results = self.backend.query(
self.project1, date_from=self.group2.first_seen,
date_filter='first_seen')
assert len(results) == 1
assert results[0] == self.group2
results = self.backend.query(
self.project1, date_to=self.group1.first_seen + timedelta(minutes=1),
date_filter='first_seen')
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(
self.project1,
date_from=self.group1.first_seen,
date_to=self.group1.first_seen + timedelta(minutes=1),
date_filter='first_seen',
)
assert len(results) == 1
assert results[0] == self.group1
def test_last_seen_date_filter(self):
backend = self.create_backend()
results = self.backend.query(
self.project1, date_from=self.group1.last_seen,
date_filter='last_seen')
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(
self.project1,
date_to=self.group1.last_seen - timedelta(minutes=1),
date_filter='last_seen')
assert len(results) == 1
assert results[0] == self.group2
results = self.backend.query(
self.project1,
date_from=self.group2.last_seen,
date_to=self.group1.last_seen - timedelta(minutes=1),
date_filter='last_seen',
)
assert len(results) == 1
assert results[0] == self.group2
|
the-stack_0_15135 | #!/usr/bin/env python
"""Unit test for the linux cmd parser."""
import os
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.parsers import linux_cmd_parser
class LinuxCmdParserTest(test_lib.GRRBaseTest):
"""Test parsing of linux command output."""
def testDpkgCmdParser(self):
"""Ensure we can extract packages from dpkg output."""
parser = linux_cmd_parser.DpkgCmdParser()
content = open(os.path.join(self.base_path, "dpkg.out")).read()
out = list(parser.Parse("/usr/bin/dpkg", ["--list"], content, "", 0, 5,
None))
self.assertEqual(len(out), 181)
self.assertTrue(isinstance(out[1], rdfvalue.SoftwarePackage))
self.assertTrue(out[0].name, "acpi-support-base")
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
|
the-stack_0_15139 | from logging import getLogger
from typing import Optional
from state_manager.models.state_managers.base import back_to_pre_state_, BaseStateManager
from state_manager.models.state import StateData
from state_manager.types.aiogram import aiogram_context
from state_manager.types.generals import Data
logger = getLogger(__name__)
class AiogramStateManager(BaseStateManager):
context: aiogram_context
async def set_next_state(self, state_name: str, *, data: Data = None) -> None:
logger.debug(f"set_next_state, state_name={state_name}, data={data}")
state_data = StateData(current_state=state_name, data=data)
await self.storage.put(self.context.from_user.id, state_data)
async def back_to_pre_state(self, *, data: Data = None) -> None:
logger.debug(f"back_to_pre_state, data={data}")
await back_to_pre_state_(self.storage, self.context.from_user.id, data)
async def _get_state_data(self) -> Optional[StateData]:
logger.debug(f"get_storage")
return await self.storage.get(self.context.from_user.id)
class Config:
arbitrary_types_allowed = True
|
the-stack_0_15140 | # -*- coding:utf-8 -*-
#
# Copyright (C) 2020-2021, Saarland University
# Copyright (C) 2020-2021, Maximilian Köhl <[email protected]>
# Copyright (C) 2020-2021, Michaela Klauck <[email protected]>
from __future__ import annotations
import dataclasses as d
import typing as t
import enum
from . import errors, expressions, operators, types
if t.TYPE_CHECKING:
from . import context
@d.dataclass(frozen=True)
class Aggregate(expressions.Expression):
"""
Applies an aggregation function over a set of states.
Attributes
----------
function:
The aggregation function to apply.
values:
The values to aggregate over.
predcate:
The predicate used to identify the states to aggregate over.
"""
function: operators.AggregationFunction
values: expressions.Expression
predicate: expressions.Expression
def infer_type(self, scope: context.Scope) -> types.Type:
predicate_type = self.predicate.infer_type(scope)
if not predicate_type == types.BOOL:
raise errors.InvalidTypeError(
f"expected types.BOOL but got {predicate_type}"
)
values_type = self.values.infer_type(scope)
if values_type not in self.function.allowed_values_type:
raise errors.InvalidTypeError(
f"invalid type {values_type} of values in filter function"
)
return self.function.infer_result_type(values_type)
@property
def children(self) -> t.Sequence[expressions.Expression]:
return (self.predicate,)
class StatePredicate(enum.Enum):
"""
An enum of state predicates to be used with :class:`Aggregate`.
"""
INITIAL = "initial"
""" The state is an initial state. """
DEADLOCK = "deadlock"
""" The state is a deadlock state. """
TIMELOCK = "timelock"
""" The state is a timelock state. """
@d.dataclass(frozen=True)
class StateSelector(expressions.Expression):
"""
State selector expression using :class:`StatePredicate`.
Attributes
----------
predicate:
A :class:`StatePredicate`.
"""
predicate: StatePredicate
def infer_type(self, scope: context.Scope) -> types.Type:
return types.BOOL
@property
def children(self) -> t.Sequence[expressions.Expression]:
return ()
INITIAL_STATES = StateSelector(StatePredicate.INITIAL)
DEADLOCK_STATES = StateSelector(StatePredicate.DEADLOCK)
TIMELOCK_STATES = StateSelector(StatePredicate.TIMELOCK)
@d.dataclass(frozen=True)
class Probability(expressions.Expression):
"""
Probability property.
Attributes
----------
operator:
*Min* or *max* probability (:class:`~momba.model.operators.MinMax`).
formula:
Boolean expression to compute the probability for.
"""
operator: operators.MinMax
formula: expressions.Expression
def infer_type(self, scope: context.Scope) -> types.Type:
formula_type = self.formula.infer_type(scope)
if not formula_type == types.BOOL:
raise errors.InvalidTypeError(f"expected types.BOOL but got {formula_type}")
return types.REAL
@property
def children(self) -> t.Sequence[expressions.Expression]:
return (self.formula,)
@d.dataclass(frozen=True)
class PathQuantifier(expressions.Expression):
"""
A temporal path quantifier property.
Attributes
----------
quantifier:
The quantifier (:class:`~momba.model.operators.Quantifier`).
formula:
The inner formula.
"""
quantifier: operators.Quantifier
formula: expressions.Expression
def infer_type(self, scope: context.Scope) -> types.Type:
formula_type = self.formula.infer_type(scope)
if not formula_type == types.BOOL:
raise errors.InvalidTypeError(f"expected types.BOOL but got {formula_type}")
return types.BOOL
@property
def children(self) -> t.Sequence[expressions.Expression]:
return (self.formula,)
class AccumulationInstant(enum.Enum):
"""
En enumeration of reward accumulation instants.
"""
STEPS = "steps"
""" Accumulate at each step. """
TIME = "time"
""" Accumulate with the progression of time. """
EXIT = "exit"
""" Accumulate after exiting a state. """
@d.dataclass(frozen=True)
class ExpectedReward(expressions.Expression):
"""
Expected reward property.
Attributes
----------
operator:
*Min* or *max* probability (:class:`~momba.model.operators.MinMax`).
reward:
Expression to compute the reward.
accumulate:
A set of accumulation instants.
reachability:
step_instant:
time_instant:
reward_instants:
"""
operator: operators.MinMax
reward: expressions.Expression
accumulate: t.Optional[t.FrozenSet[AccumulationInstant]] = None
reachability: t.Optional[expressions.Expression] = None
step_instant: t.Optional[expressions.Expression] = None
time_instant: t.Optional[expressions.Expression] = None
reward_instants: t.Optional[t.Sequence[RewardInstant]] = None
def infer_type(self, scope: context.Scope) -> types.Type:
# TODO: check the types of the provided arguments
return types.REAL
@property
def children(self) -> t.Sequence[expressions.Expression]:
children: t.List[expressions.Expression] = []
if self.reachability is not None:
children.append(self.reachability)
if self.step_instant is not None:
children.append(self.step_instant)
if self.time_instant is not None:
children.append(self.time_instant)
if self.reward_instants is not None:
for reward_instant in self.reward_instants:
children.extend(reward_instant.children)
return children
@d.dataclass(frozen=True)
class RewardInstant:
"""
A reward instant.
Attributes
----------
expression:
accumulate:
instant:
"""
expression: expressions.Expression
accumulate: t.FrozenSet[AccumulationInstant]
instant: expressions.Expression
@property
def children(self) -> t.Sequence[expressions.Expression]:
return (self.expression, self.instant)
@d.dataclass(frozen=True)
class SteadyState(expressions.Expression):
"""
A *steady-state* property.
Attributes
----------
operator:
formula:
accumulate:
"""
operator: operators.MinMax
formula: expressions.Expression
accumulate: t.Optional[t.FrozenSet[AccumulationInstant]] = None
def infer_type(self, scope: context.Scope) -> types.Type:
# TODO: check the types of the provided arguments
return types.REAL
@property
def children(self) -> t.Sequence[expressions.Expression]:
return (self.formula,)
@d.dataclass(frozen=True)
class BinaryPathFormula(expressions.Expression):
"""
A temporal binary path formula.
Attributes
----------
operator:
left:
right:
step_bounds:
time_bounds:
reward_bounds:
"""
operator: operators.BinaryPathOperator
left: expressions.Expression
right: expressions.Expression
step_bounds: t.Optional[Interval] = None
time_bounds: t.Optional[Interval] = None
reward_bounds: t.Optional[t.Sequence[RewardBound]] = None
def infer_type(self, scope: context.Scope) -> types.Type:
left_type = self.left.infer_type(scope)
if left_type != types.BOOL:
raise errors.InvalidTypeError(f"expected types.BOOL but got {left_type}")
right_type = self.left.infer_type(scope)
if right_type != types.BOOL:
raise errors.InvalidTypeError(f"expected types.BOOL but got {right_type}")
# TODO: check the types of the other arguments
return types.BOOL
@property
def children(self) -> t.Sequence[expressions.Expression]:
children: t.List[expressions.Expression] = [self.left, self.right]
if self.step_bounds is not None:
children.extend(self.step_bounds.expressions)
if self.time_bounds is not None:
children.extend(self.time_bounds.expressions)
if self.reward_bounds is not None:
for reward_bound in self.reward_bounds:
children.extend(reward_bound.expressions)
return children
@d.dataclass(frozen=True)
class UnaryPathFormula(expressions.Expression):
"""
A temporal unary path formula.
Attributes
----------
operator:
formula:
step_bounds:
time_bounds:
reward_bounds:
"""
operator: operators.UnaryPathOperator
formula: expressions.Expression
step_bounds: t.Optional[Interval] = None
time_bounds: t.Optional[Interval] = None
reward_bounds: t.Optional[t.Sequence[RewardBound]] = None
def infer_type(self, scope: context.Scope) -> types.Type:
formula_type = self.formula.infer_type(scope)
if formula_type != types.BOOL:
raise errors.InvalidTypeError(f"expected types.BOOL but got {formula_type}")
# TODO: check the types of the other arguments
return types.BOOL
@property
def children(self) -> t.Sequence[expressions.Expression]:
children: t.List[expressions.Expression] = [self.formula]
if self.step_bounds is not None:
children.extend(self.step_bounds.expressions)
if self.time_bounds is not None:
children.extend(self.time_bounds.expressions)
if self.reward_bounds is not None:
for reward_bound in self.reward_bounds:
children.extend(reward_bound.expressions)
return children
@d.dataclass(frozen=True)
class Interval:
"""
An interval.
Attributes
----------
lower:
The lower bound of the interval or :code:`None`.
upper:
The upper bound of the interval or :code:`None`.
lower_exclusive:
Whether the lower bound is exclusive.
upper_exclusive:
Whether the upper bound is exclusive.
"""
lower: t.Optional[expressions.Expression] = None
upper: t.Optional[expressions.Expression] = None
lower_exclusive: t.Optional[expressions.Expression] = None
upper_exclusive: t.Optional[expressions.Expression] = None
@property
def expressions(self) -> t.Sequence[expressions.Expression]:
return [
expr
for expr in [
self.lower,
self.upper,
self.lower_exclusive,
self.upper_exclusive,
]
if expr is not None
]
@d.dataclass(frozen=True)
class RewardBound:
"""
A *reward bound*.
Attributes
----------
expression:
accumulate:
bounds:
"""
expression: expressions.Expression
accumulate: t.FrozenSet[AccumulationInstant]
bounds: Interval
@property
def expressions(self) -> t.Sequence[expressions.Expression]:
expressions = [self.expression]
expressions.extend(self.bounds.expressions)
return expressions
def aggregate(
function: operators.AggregationFunction,
values: expressions.Expression,
states: expressions.Expression = INITIAL_STATES,
) -> expressions.Expression:
"""
Creates an :class:`Aggregate` property.
"""
return Aggregate(function, values, states)
def min_prob(formula: expressions.Expression) -> expressions.Expression:
"""
Constructs a :math:`P_\\mathit{min}` property.
"""
return Probability(operators.MinMax.MIN, formula)
def max_prob(formula: expressions.Expression) -> expressions.Expression:
"""
Constructs a :math:`P_\\mathit{max}` property.
"""
return Probability(operators.MinMax.MAX, formula)
def forall_paths(formula: expressions.Expression) -> expressions.Expression:
"""
CTL :math:`\\forall` path operator.
"""
return PathQuantifier(operators.Quantifier.FORALL, formula)
def exists_path(formula: expressions.Expression) -> expressions.Expression:
"""
CTL :math:`\\exists` exists operator.
"""
return PathQuantifier(operators.Quantifier.EXISTS, formula)
def min_expected_reward(
reward: expressions.Expression,
*,
accumulate: t.Optional[t.AbstractSet[AccumulationInstant]] = None,
reachability: t.Optional[expressions.Expression] = None,
step_instant: t.Optional[expressions.Expression] = None,
time_instant: t.Optional[expressions.Expression] = None,
reward_instants: t.Optional[t.Sequence[RewardInstant]] = None,
) -> expressions.Expression:
"""
Constructs a :math:`E_\\mathit{min}` property.
"""
return ExpectedReward(
operators.MinMax.MIN,
reward,
accumulate=None if accumulate is None else frozenset(accumulate),
reachability=reachability,
step_instant=step_instant,
time_instant=time_instant,
reward_instants=reward_instants,
)
def max_expected_reward(
reward: expressions.Expression,
*,
accumulate: t.Optional[t.AbstractSet[AccumulationInstant]] = None,
reachability: t.Optional[expressions.Expression] = None,
step_instant: t.Optional[expressions.Expression] = None,
time_instant: t.Optional[expressions.Expression] = None,
reward_instants: t.Optional[t.Sequence[RewardInstant]] = None,
) -> expressions.Expression:
"""
Constructs a :math:`E_\\mathit{max}` property.
"""
return ExpectedReward(
operators.MinMax.MAX,
reward,
accumulate=None if accumulate is None else frozenset(accumulate),
reachability=reachability,
step_instant=step_instant,
time_instant=time_instant,
reward_instants=reward_instants,
)
def min_steady_state(
formula: expressions.Expression,
*,
accumulate: t.Optional[t.AbstractSet[AccumulationInstant]] = None,
) -> expressions.Expression:
"""
Constructs a :math:`S_\\mathit{min}` property.
"""
return SteadyState(
operators.MinMax.MIN,
formula,
accumulate=None if accumulate is None else frozenset(accumulate),
)
def max_steady_state(
formula: expressions.Expression,
*,
accumulate: t.Optional[t.AbstractSet[AccumulationInstant]] = None,
) -> expressions.Expression:
"""
Constructs a :math:`S_\\mathit{max}` property.
"""
return SteadyState(
operators.MinMax.MAX,
formula,
accumulate=None if accumulate is None else frozenset(accumulate),
)
def until(
left: expressions.Expression,
right: expressions.Expression,
*,
step_bounds: t.Optional[Interval] = None,
time_bounds: t.Optional[Interval] = None,
reward_bounds: t.Optional[t.Sequence[RewardBound]] = None,
) -> expressions.Expression:
"""
Constructs a temporal *until* property.
"""
return BinaryPathFormula(
operators.BinaryPathOperator.UNTIL,
left,
right,
step_bounds=step_bounds,
time_bounds=time_bounds,
reward_bounds=reward_bounds,
)
def weak_until(
left: expressions.Expression,
right: expressions.Expression,
*,
step_bounds: t.Optional[Interval] = None,
time_bounds: t.Optional[Interval] = None,
reward_bounds: t.Optional[t.Sequence[RewardBound]] = None,
) -> expressions.Expression:
"""
Constructs a temporal *weak-until* property.
"""
return BinaryPathFormula(
operators.BinaryPathOperator.WEAK_UNTIL,
left,
right,
step_bounds=step_bounds,
time_bounds=time_bounds,
reward_bounds=reward_bounds,
)
def release(
left: expressions.Expression,
right: expressions.Expression,
*,
step_bounds: t.Optional[Interval] = None,
time_bounds: t.Optional[Interval] = None,
reward_bounds: t.Optional[t.Sequence[RewardBound]] = None,
) -> expressions.Expression:
"""
Constructs a temporal *release* property.
"""
return BinaryPathFormula(
operators.BinaryPathOperator.RELEASE,
left,
right,
step_bounds=step_bounds,
time_bounds=time_bounds,
reward_bounds=reward_bounds,
)
def eventually(
formula: expressions.Expression,
*,
step_bounds: t.Optional[Interval] = None,
time_bounds: t.Optional[Interval] = None,
reward_bounds: t.Optional[t.Sequence[RewardBound]] = None,
) -> expressions.Expression:
"""
Constructs a temporal *evenutally* property.
"""
return UnaryPathFormula(
operators.UnaryPathOperator.EVENTUALLY,
formula,
step_bounds=step_bounds,
time_bounds=time_bounds,
reward_bounds=reward_bounds,
)
def globally(
formula: expressions.Expression,
*,
step_bounds: t.Optional[Interval] = None,
time_bounds: t.Optional[Interval] = None,
reward_bounds: t.Optional[t.Sequence[RewardBound]] = None,
) -> expressions.Expression:
"""
Constructs a temporal *globally* property.
"""
return UnaryPathFormula(
operators.UnaryPathOperator.GLOBALLY,
formula,
step_bounds=step_bounds,
time_bounds=time_bounds,
reward_bounds=reward_bounds,
)
|
the-stack_0_15141 | import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
class TestSpatialPyramidPooling2D(unittest.TestCase):
pyramid_height = 3
output_dim = 63 # channels(c=3) * (1 + 4 + 16) = 63
n, c, h, w = 2, 3, 9, 8
pooling_class = functions.MaxPooling2D
def setUp(self):
# Avoid unstability of numerical gradient
self.x = numpy.random.randn(
self.n, self.c, self.h, self.w).astype(numpy.float32)
self.one = numpy.ones(
(self.n, self.c, self.h, self.w)).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, (self.n, self.output_dim, 1, 1))
self.gy = self.gy.astype(numpy.float32)
def check_forward(self, x_data, use_cudnn=True):
x = chainer.Variable(x_data)
y = functions.spatial_pyramid_pooling_2d(
x, self.pyramid_height, self.pooling_class,
use_cudnn=use_cudnn)
self.assertEqual(y.data.dtype, numpy.float32)
y_data = cuda.to_cpu(y.data)
self.assertEqual(self.gy.shape, y_data.shape)
def check_forward_ones(self, x_data, use_cudnn=True):
x = chainer.Variable(x_data)
y = functions.spatial_pyramid_pooling_2d(
x, self.pyramid_height, self.pooling_class, use_cudnn=use_cudnn)
y_data = cuda.to_cpu(y.data)
self.assertEqual((self.n, self.output_dim, 1, 1), y_data.shape)
gradient_check.assert_allclose(y_data, numpy.ones_like(y_data))
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x)
self.check_forward_ones(self.one)
@attr.cudnn
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
self.check_forward_ones(cuda.to_gpu(self.one))
@attr.gpu
@condition.retry(3)
def test_forward_gpu_no_cudnn(self):
self.check_forward(cuda.to_gpu(self.x), False)
self.check_forward_ones(cuda.to_gpu(self.one), False)
def check_backward(self, x_data, y_grad, use_cudnn=True):
x = chainer.Variable(x_data)
y = functions.spatial_pyramid_pooling_2d(
x, self.pyramid_height, self.pooling_class, use_cudnn=use_cudnn)
y.grad = y_grad
y.backward()
func = y.creator
f = lambda: func.forward((x.data,))
gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,))
gradient_check.assert_allclose(
cuda.to_cpu(gx),
cuda.to_cpu(x.grad),
atol=1e-04)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy), False)
testing.run_module(__name__, __file__)
|
the-stack_0_15142 | from conans import ConanFile, tools, CMake
import os
class SDL2ImageConan(ConanFile):
name = "sdl2_image"
description = "SDL_image is an image file loading library"
topics = ("sdl2_image", "sdl_image", "sdl2", "sdl", "images", "opengl")
url = "https://github.com/bincrafters/community"
homepage = "https://www.libsdl.org/projects/SDL_image/"
license = "MIT"
exports_sources = ["CMakeLists.txt"]
generators = ["cmake", "cmake_find_package_multi"]
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"bmp": [True, False],
"gif": [True, False],
"lbm": [True, False],
"pcx": [True, False],
"pnm": [True, False],
"svg": [True, False],
"tga": [True, False],
"xcf": [True, False],
"xpm": [True, False],
"xv": [True, False],
"jpg": [True, False],
"tif": [True, False],
"png": [True, False],
"webp": [True, False],
"imageio": [True, False]}
default_options = {
"shared": False,
"fPIC": True,
"bmp": True,
"gif": True,
"lbm": True,
"pcx": True,
"pnm": True,
"svg": True,
"tga": True,
"xcf": True,
"xpm": True,
"xv": True,
"jpg": True,
"tif": True,
"png": True,
"webp": True,
"imageio": False
}
_cmake = None
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
def config_options(self):
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
if self.settings.os == "Windows":
del self.options.fPIC
if self.settings.os != "Macos":
del self.options.imageio
def requirements(self):
self.requires("sdl2/2.0.14@bincrafters/stable")
if self.options.tif:
self.requires("libtiff/4.0.9")
if self.options.jpg:
self.requires("libjpeg/9d")
if self.options.png:
self.requires("libpng/1.6.37")
if self.options.webp:
self.requires("libwebp/1.0.3")
self.requires("zlib/1.2.11")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = "SDL2_image-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["BMP"] = self.options.bmp
self._cmake.definitions["GIF"] = self.options.gif
self._cmake.definitions["IMAGEIO"] = self.options.get_safe("imageio")
self._cmake.definitions["JPG"] = self.options.jpg
self._cmake.definitions["LBM"] = self.options.lbm
self._cmake.definitions["PCX"] = self.options.pcx
self._cmake.definitions["PNG"] = self.options.png
self._cmake.definitions["PNM"] = self.options.pnm
self._cmake.definitions["SVG"] = self.options.svg
self._cmake.definitions["TGA"] = self.options.tga
self._cmake.definitions["TIF"] = self.options.tif
self._cmake.definitions["WEBP"] = self.options.webp
self._cmake.definitions["XCF"] = self.options.xcf
self._cmake.definitions["XPM"] = self.options.xpm
self._cmake.definitions["XV"] = self.options.xv
# TODO: https://github.com/bincrafters/community/pull/1317#pullrequestreview-584847138
self._cmake.definitions["TIF_DYNAMIC"] = self.options["libtiff"].shared if self.options.tif else False
self._cmake.definitions["JPG_DYNAMIC"] = self.options["libjpeg"].shared if self.options.jpg else False
self._cmake.definitions["PNG_DYNAMIC"] = self.options["libpng"].shared if self.options.png else False
self._cmake.definitions["WEBP_DYNAMIC"] = self.options["libwebp"].shared if self.options.webp else False
self._cmake.definitions["SDL_IS_SHARED"] = self.options["sdl2"].shared
self._cmake.configure(build_dir="build")
return self._cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy(pattern="COPYING.txt", dst="license", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
def package_info(self):
self.cpp_info.libs = ["SDL2_image"]
self.cpp_info.includedirs.append(os.path.join("include", "SDL2"))
# TODO: Add components in a sane way. SDL2_image might be incorrect, as the current dev version uses SDL2::image
# The current dev version is the first version with official CMake support
self.cpp_info.names["cmake_find_package"] = "SDL2_image"
self.cpp_info.names["cmake_find_package_multi"] = "SDL2_image"
|
the-stack_0_15143 | """LaTeX Exporter class"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import os
from traitlets import Unicode, default
from traitlets.config import Config
from nbconvert.filters.highlight import Highlight2Latex
from nbconvert.filters.filter_links import resolve_references
from .templateexporter import TemplateExporter
class LatexExporter(TemplateExporter):
"""
Exports to a Latex template. Inherit from this class if your template is
LaTeX based and you need custom tranformers/filters. Inherit from it if
you are writing your own HTML template and need custom tranformers/filters.
If you don't need custom tranformers/filters, just change the
'template_file' config option. Place your template in the special "/latex"
subfolder of the "../templates" folder.
"""
export_from_notebook = "latex"
@default('file_extension')
def _file_extension_default(self):
return '.tex'
@default('template_file')
def _template_file_default(self):
return 'article.tplx'
# Latex constants
@default('default_template_path')
def _default_template_path_default(self):
return os.path.join("..", "templates", "latex")
@default('template_skeleton_path')
def _template_skeleton_path_default(self):
return os.path.join("..", "templates", "latex", "skeleton")
#Extension that the template files use.
template_extension = Unicode(".tplx").tag(config=True)
output_mimetype = 'text/latex'
def default_filters(self):
for x in super(LatexExporter, self).default_filters():
yield x
yield ('resolve_references', resolve_references)
@property
def default_config(self):
c = Config({
'NbConvertBase': {
'display_data_priority' : ['text/latex', 'application/pdf', 'image/png', 'image/jpeg', 'image/svg+xml', 'text/markdown', 'text/plain']
},
'ExtractOutputPreprocessor': {
'enabled':True
},
'SVG2PDFPreprocessor': {
'enabled':True
},
'LatexPreprocessor': {
'enabled':True
},
'SphinxPreprocessor': {
'enabled':True
},
'HighlightMagicsPreprocessor': {
'enabled':True
}
})
c.merge(super(LatexExporter,self).default_config)
return c
def from_notebook_node(self, nb, resources=None, **kw):
langinfo = nb.metadata.get('language_info', {})
lexer = langinfo.get('pygments_lexer', langinfo.get('name', None))
self.register_filter('highlight_code',
Highlight2Latex(pygments_lexer=lexer, parent=self))
return super(LatexExporter, self).from_notebook_node(nb, resources, **kw)
def _create_environment(self):
environment = super(LatexExporter, self)._create_environment()
# Set special Jinja2 syntax that will not conflict with latex.
environment.block_start_string = "((*"
environment.block_end_string = "*))"
environment.variable_start_string = "((("
environment.variable_end_string = ")))"
environment.comment_start_string = "((="
environment.comment_end_string = "=))"
return environment
|
the-stack_0_15145 | """
Solves problem 7 of the One Hundred Dollars, One Hundred Digits Challenge.
"""
import numpy as np
from pysparse.sparse import spmatrix
from pysparse.itsolvers.krylov import minres
from pysparse.precon import precon
def get_primes(nofPrimes):
primes = np.zeros(nofPrimes, 'i')
primes[0] = 2
nof = 1
i = 3
while 1:
for p in primes[:nof]:
if i%p == 0 or p*p > i: break
if i%p != 0:
primes[nof] = i
nof += 1
if nof >= nofPrimes:
break
i = i+2
return primes
n = 20000
print('Generating first %d primes...' % n)
primes = get_primes(n)
print('Assembling coefficient matrix...')
A = spmatrix.ll_mat_sym(n, n*8)
d = 1
while d < n:
for i in range(d, n):
A[i,i-d] = 1.0
d *= 2
for i in range(n):
A[i,i] = 1.0 * primes[i]
A = A.to_sss()
K = precon.ssor(A)
print('Solving linear system...')
b = np.zeros(n); b[0] = 1.0
x = np.empty(n)
info, iter, relres = minres(A, b, x, 1e-16, n, K)
print(info, iter, relres)
print('%.16e' % x[0])
|
the-stack_0_15147 | import numpy as np
import scipy.interpolate as si
import tools
def stdextr(data, x1, x2, variance=None, mask=None, interp=False):
"""
Standard box extraction of spectrum. Step 4 of Horne (1989).
Parameters:
-----------
data: 2D float ndarray
Sky-subtracted spectrum image of shape [nwavelength, nposition].
x1: Integer
Left X boundary of region to extract the spectrum.
x2: Integer
Right X boundary of region to extract the spectrum.
Note that: 0 <= x1 <= x2 <= nx
variance: 2D float ndarray
Variance image from processed image.
mask: 2D integer ndarray
Mask of the data image (1 = good pixel, 0 = bad pixel).
interp: Bool
If True, lineraly interpolate the data for bad pixels.
Returns:
--------
stdspec: 1D float ndarray
The extracted spectrum.
stdvar: 1D float ndarray
Variance of extracted spectrum.
Example:
--------
>>> import sys
>>> import astropy.io.fits as fits
>>> import matplotlib.pyplot as plt
>>> sys.path.append("./src/")
>>> import stdextr as se
>>> data = fits.getdata("./images/ex1.fits")
>>> spec, sstd = se.stdextr(data, 230, 270)
>>> plt.plot(spec)
"""
# Check inputs:
nwave, nx = np.shape(data)
if variance is None:
variance = np.ones((nwave, nx), np.double)
if mask is None:
mask = np.ones((nwave, nx), np.byte)
if x1 < 0 or x2 <= x1 or nx < x2:
tools.error("Invalid x1, x2 boundaries (={:d}, {:d}), the values must "
"satisfy:\n 0 <= x1 < x2 <= nx (={:d}).".format(x1, x2, nx))
if np.shape(variance) != (nwave, nx):
tools.error("Incompatible shapes between data image ({:d}, {:d}) and "
"variance image ({:d}, {:d}).".format(nwave, nx, *np.shape(variance)))
if np.shape(mask) != (nwave, nx):
tools.error("Incompatible shapes between data image ({:d}, {:d}) and "
"mask image ({:d}, {:d}).".format(nwave, nx, *np.shape(mask)))
# Interpolate over bad pixels:
if interp:
stdspec = np.zeros(nwave)
for i in np.arange(nwave):
bad = np.where(mask[i, x1:x2] == 0)
good = np.where(mask[i, x1:x2] == 1)
datav = np.copy(data[i, x1:x2])
if len(bad) != 0:
interpol = si.interp1d(datav[good], good[0], kind="linear")
datav[bad] = interpol(bad[0])
stdspec[i] = np.sum(datav)
return stdspec, np.zeros(nwave)
# Standard extraction:
stdspec = np.sum((data * mask)[:, x1:x2], axis=1)
stdvar = np.sum((variance * mask)[:, x1:x2], axis=1)
return stdspec, stdvar
|
the-stack_0_15149 | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcls import __version__
from mmcls.apis import init_random_seed, set_random_seed, train_model
from mmcls.datasets import build_dataset
from mmcls.models import build_classifier
from mmcls.utils import collect_env, get_root_logger, setup_multi_processes
def parse_args():
parser = argparse.ArgumentParser(description='Train a model')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--device', help='device used for training. (Deprecated)')
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
if args.device:
warnings.warn(
'--device is deprecated. To use cpu to train, please '
'refers to https://mmclassification.readthedocs.io/en/latest/'
'getting_started.html#train-a-model')
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set multi-process settings
setup_multi_processes(cfg)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
seed = init_random_seed(args.seed)
logger.info(f'Set random seed to {seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(seed, deterministic=args.deterministic)
cfg.seed = seed
meta['seed'] = seed
model = build_classifier(cfg.model)
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
# save mmcls version, config file content and class names in
# runner as meta data
meta.update(
dict(
mmcls_version=__version__,
config=cfg.pretty_text,
CLASSES=datasets[0].CLASSES))
# add an attribute for visualization convenience
train_model(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
device='cpu' if args.device == 'cpu' else 'cuda',
meta=meta)
if __name__ == '__main__':
main()
|
the-stack_0_15150 | import numpy as np
import logging
from pyrfr import regression
from smac.epm.base_epm import AbstractEPM
from smac.configspace import (
CategoricalHyperparameter,
UniformFloatHyperparameter,
UniformIntegerHyperparameter,
Constant,
)
class RandomForestWithInstances(AbstractEPM):
"""
Base EPM. Very similar to SMAC3s EPM.
Interface to the random forest that takes instance features
into account.
Attributes
----------
rf_opts :
Random forest hyperparameter
n_points_per_tree : int
rf : regression.binary_rss_forest
Only available after training
hypers: list
List of random forest hyperparameters
seed : int
types : list
bounds : list
rng : np.random.RandomState
logger : logging.logger
"""
def __init__(self,
configspace,
types: np.ndarray,
bounds: np.ndarray,
seed: int,
num_trees: int = 10,
do_bootstrapping: bool = True,
n_points_per_tree: int = -1,
ratio_features: float = 5. / 6.,
min_samples_split: int = 3,
min_samples_leaf: int = 3,
max_depth: int = 20,
eps_purity: int = 1e-8,
max_num_nodes: int = 2 ** 20,
logged_y: bool = True,
**kwargs):
"""Constructor
Parameters
----------
configspace: ConfigurationSpace
configspace to be passed to random forest (used to impute inactive parameter-values)
types : np.ndarray (D)
Specifies the number of categorical values of an input dimension where
the i-th entry corresponds to the i-th input dimension. Let's say we
have 2 dimension where the first dimension consists of 3 different
categorical choices and the second dimension is continuous than we
have to pass np.array([2, 0]). Note that we count starting from 0.
bounds : np.ndarray (D, 2)
Specifies the bounds for continuous features.
seed : int
The seed that is passed to the random_forest_run library.
num_trees : int
The number of trees in the random forest.
do_bootstrapping : bool
Turns on / off bootstrapping in the random forest.
n_points_per_tree : int
Number of points per tree. If <= 0 X.shape[0] will be used
in _train(X, y) instead
ratio_features : float
The ratio of features that are considered for splitting.
min_samples_split : int
The minimum number of data points to perform a split.
min_samples_leaf : int
The minimum number of data points in a leaf.
max_depth : int
The maximum depth of a single tree.
eps_purity : float
The minimum difference between two target values to be considered
different
max_num_nodes : int
The maxmimum total number of nodes in a tree
logged_y: bool
Indicates if the y data is transformed (i.e. put on logscale) or not
"""
super().__init__(configspace=configspace, types=types, bounds=bounds, seed=seed, **kwargs)
self.configspace = configspace
self.types = types
self.bounds = bounds
self.rng = regression.default_random_engine(seed)
self.rf_opts = regression.forest_opts()
self.rf_opts.num_trees = num_trees
self.rf_opts.do_bootstrapping = do_bootstrapping
max_features = 0 if ratio_features > 1.0 else \
max(1, int(types.shape[0] * ratio_features))
self.rf_opts.tree_opts.max_features = max_features
self.rf_opts.tree_opts.min_samples_to_split = min_samples_split
self.rf_opts.tree_opts.min_samples_in_leaf = min_samples_leaf
self.rf_opts.tree_opts.max_depth = max_depth
self.rf_opts.tree_opts.epsilon_purity = eps_purity
self.rf_opts.tree_opts.max_num_nodes = max_num_nodes
self.rf_opts.compute_law_of_total_variance = False # Always off. No need for this in our base EPM
self.n_points_per_tree = n_points_per_tree
self.rf = None # type: regression.binary_rss_forest
self.logged_y = logged_y
# This list well be read out by save_iteration() in the solver
self.hypers = [num_trees, max_num_nodes, do_bootstrapping,
n_points_per_tree, ratio_features, min_samples_split,
min_samples_leaf, max_depth, eps_purity, seed]
self.seed = seed
self.impute_values = {}
self.logger = logging.getLogger(self.__module__ + "." +
self.__class__.__name__)
def _impute_inactive(self, X: np.ndarray) -> np.ndarray:
X = X.copy()
for idx, hp in enumerate(self.configspace.get_hyperparameters()):
if idx not in self.impute_values:
parents = self.configspace.get_parents_of(hp.name)
if len(parents) == 0:
self.impute_values[idx] = None
else:
if isinstance(hp, CategoricalHyperparameter):
self.impute_values[idx] = len(hp.choices)
elif isinstance(hp, (UniformFloatHyperparameter, UniformIntegerHyperparameter)):
self.impute_values[idx] = -1
elif isinstance(hp, Constant):
self.impute_values[idx] = 1
else:
raise ValueError
nonfinite_mask = ~np.isfinite(X[:, idx])
X[nonfinite_mask, idx] = self.impute_values[idx]
return X
def _train(self, X: np.ndarray, y: np.ndarray, **kwargs):
"""Trains the random forest on X and y.
Parameters
----------
X : np.ndarray [n_samples, n_features (config + instance features)]
Input data points.
Y : np.ndarray [n_samples, ]
The corresponding target values.
Returns
-------
self
"""
self.X = self._impute_inactive(X)
self.y = y.flatten()
if self.n_points_per_tree <= 0:
self.rf_opts.num_data_points_per_tree = self.X.shape[0]
else:
self.rf_opts.num_data_points_per_tree = self.n_points_per_tree
self.rf = regression.binary_rss_forest()
self.rf.options = self.rf_opts
data = self.__init_data_container(self.X, self.y)
self.rf.fit(data, rng=self.rng)
return self
def __init_data_container(self, X: np.ndarray, y: np.ndarray):
"""
Biggest difference to SMAC3s EPM. We fit the forrest on a transformation and predict the untransformed result.
Fills a pyrfr default data container, s.t. the forest knows
categoricals and bounds for continous data
Parameters
----------
X : np.ndarray [n_samples, n_features]
Input data points
y : np.ndarray [n_samples, ]
Corresponding target values
Returns
-------
data : regression.default_data_container
The filled data container that pyrfr can interpret
"""
# retrieve the types and the bounds from the ConfigSpace
data = regression.default_data_container(X.shape[1])
if self.logged_y:
y = y.reshape((-1, 1))
y = np.hstack((y, np.power(10, y)))
for i, (mn, mx) in enumerate(self.bounds):
if np.isnan(mx):
data.set_type_of_feature(i, mn)
else:
data.set_bounds_of_feature(i, mn, mx)
for row_X, row_y in zip(X, y):
data.add_data_point(row_X, row_y)
return data
def _predict(self, X: np.ndarray, cov_return_type='diagonal_cov'):
"""Predict means and variances for given X.
Parameters
----------
X : np.ndarray of shape = [n_samples,
n_features (config + instance features)]
Returns
-------
means : np.ndarray of shape = [n_samples, 1]
Predictive mean
vars : np.ndarray of shape = [n_samples, 1]
Predictive variance
"""
if len(X.shape) != 2:
raise ValueError(
'Expected 2d array, got %dd array!' % len(X.shape))
if X.shape[1] != len(self._initial_types):
raise ValueError('Rows in X should have %d entries but have %d!' %
(len(self._initial_types), X.shape[1]))
if cov_return_type != 'diagonal_cov':
raise ValueError("'cov_return_type' can only take 'diagonal_cov' for this model")
means, vars_ = [], []
X = self._impute_inactive(X)
for row_X in X:
mean, var = self.rf.predict_mean_var(row_X)
means.append(mean)
vars_.append(var)
means = np.array(means)
vars_ = np.array(vars_)
return means.reshape((-1, 1)), vars_.reshape((-1, 1))
|
the-stack_0_15152 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('training', '0003_training_published_mecc_url'),
]
operations = [
migrations.AddField(
model_name='training',
name='is_existing_rof',
field=models.BooleanField(verbose_name="Témoin d'existence dans ROF", default=True),
),
migrations.AddField(
model_name='training',
name='recup_atb_ens',
field=models.BooleanField(verbose_name='Témoin de récupération des responsables, coef. et notes seuil', default=False),
),
]
|
the-stack_0_15154 | """Computes the similarity of molecular scaffolds between two datasets."""
from itertools import product
import math
import os
import sys
from typing import List
from typing_extensions import Literal
import numpy as np
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem import AllChem
from tqdm import tqdm
from tap import Tap # pip install typed-argument-parser (https://github.com/swansonk14/typed-argument-parser)
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from chemprop.data import get_data, scaffold_to_smiles
class Args(Tap):
data_path_1: str # Path to first data CSV file
data_path_2: str # Path to second data CSV file
smiles_column_1: str = None # Name of the column containing SMILES strings for the first data. By default, uses the first column.
smiles_column_2: str = None # Name of the column containing SMILES strings for the second data. By default, uses the first column.
similarity_measure: Literal['scaffold', 'morgan'] # Similarity measure to use to compare the two datasets
radius: int = 3 # Radius of Morgan fingerprint
sample_rate: float = 1.0 # Rate at which to sample pairs of molecules for Morgan similarity (to reduce time)
def scaffold_similarity(smiles_1: List[str], smiles_2: List[str]):
"""
Determines the similarity between the scaffolds of two lists of smiles strings.
:param smiles_1: A list of smiles strings.
:param smiles_2: A list of smiles strings.
"""
# Get scaffolds
scaffold_to_smiles_1 = scaffold_to_smiles(smiles_1)
scaffold_to_smiles_2 = scaffold_to_smiles(smiles_2)
scaffolds_1, smiles_sets_1 = zip(*scaffold_to_smiles_1.items())
scaffolds_2, smiles_sets_2 = zip(*scaffold_to_smiles_2.items())
smiles_to_scaffold = {smiles: scaffold for scaffold, smiles_set in scaffold_to_smiles_1.items() for smiles in smiles_set}
smiles_to_scaffold.update({smiles: scaffold for scaffold, smiles_set in scaffold_to_smiles_2.items() for smiles in smiles_set})
# Determine similarity
scaffolds_1, scaffolds_2 = set(scaffolds_1), set(scaffolds_2)
smiles_1, smiles_2 = set(smiles_1), set(smiles_2)
all_scaffolds = scaffolds_1 | scaffolds_2
all_smiles = smiles_1 | smiles_2
scaffolds_intersection = scaffolds_1 & scaffolds_2
# smiles_intersection is smiles with a scaffold that appears in both datasets
smiles_intersection = {smiles for smiles in all_smiles if smiles_to_scaffold[smiles] in scaffolds_intersection}
smiles_in_1_with_scaffold_in_2 = {smiles for smiles in smiles_1 if smiles_to_scaffold[smiles] in scaffolds_2}
smiles_in_2_with_scaffold_in_1 = {smiles for smiles in smiles_2 if smiles_to_scaffold[smiles] in scaffolds_1}
sizes_1 = np.array([len(smiles_set) for smiles_set in smiles_sets_1])
sizes_2 = np.array([len(smiles_set) for smiles_set in smiles_sets_2])
# Print results
print()
print(f'Number of molecules = {len(all_smiles):,}')
print(f'Number of scaffolds = {len(all_scaffolds):,}')
print()
print(f'Number of scaffolds in both datasets = {len(scaffolds_intersection):,}')
print(f'Scaffold intersection over union = {len(scaffolds_intersection) / len(all_scaffolds):.4f}')
print()
print(f'Number of molecules with scaffold in both datasets = {len(smiles_intersection):,}')
print(f'Molecule intersection over union = {len(smiles_intersection) / len(all_smiles):.4f}')
print()
print(f'Number of molecules in dataset 1 = {np.sum(sizes_1):,}')
print(f'Number of scaffolds in dataset 1 = {len(scaffolds_1):,}')
print()
print(f'Number of molecules in dataset 2 = {np.sum(sizes_2):,}')
print(f'Number of scaffolds in dataset 2 = {len(scaffolds_2):,}')
print()
print(f'Percent of scaffolds in dataset 1 which are also in dataset 2 = {100 * len(scaffolds_intersection) / len(scaffolds_1):.2f}%')
print(f'Percent of scaffolds in dataset 2 which are also in dataset 1 = {100 * len(scaffolds_intersection) / len(scaffolds_2):.2f}%')
print()
print(f'Number of molecules in dataset 1 with scaffolds in dataset 2 = {len(smiles_in_1_with_scaffold_in_2):,}')
print(f'Percent of molecules in dataset 1 with scaffolds in dataset 2 = {100 * len(smiles_in_1_with_scaffold_in_2) / len(smiles_1):.2f}%')
print()
print(f'Number of molecules in dataset 2 with scaffolds in dataset 1 = {len(smiles_in_2_with_scaffold_in_1):,}')
print(f'Percent of molecules in dataset 2 with scaffolds in dataset 1 = {100 * len(smiles_in_2_with_scaffold_in_1) / len(smiles_2):.2f}%')
print()
print(f'Average number of molecules per scaffold in dataset 1 = {np.mean(sizes_1):.4f} +/- {np.std(sizes_1):.4f}')
print('Percentiles for molecules per scaffold in dataset 1')
print(' | '.join([f'{i}% = {int(np.percentile(sizes_1, i)):,}' for i in range(0, 101, 10)]))
print()
print(f'Average number of molecules per scaffold in dataset 2 = {np.mean(sizes_2):.4f} +/- {np.std(sizes_2):.4f}')
print('Percentiles for molecules per scaffold in dataset 2')
print(' | '.join([f'{i}% = {int(np.percentile(sizes_2, i)):,}' for i in range(0, 101, 10)]))
def morgan_similarity(smiles_1: List[str], smiles_2: List[str], radius: int, sample_rate: float):
"""
Determines the similarity between the morgan fingerprints of two lists of smiles strings.
:param smiles_1: A list of smiles strings.
:param smiles_2: A list of smiles strings.
:param radius: The radius of the morgan fingerprints.
:param sample_rate: Rate at which to sample pairs of molecules for Morgan similarity (to reduce time).
"""
# Compute similarities
similarities = []
num_pairs = len(smiles_1) * len(smiles_2)
# Sample to improve speed
if sample_rate < 1.0:
sample_num_pairs = sample_rate * num_pairs
sample_size = math.ceil(math.sqrt(sample_num_pairs))
sample_smiles_1 = np.random.choice(smiles_1, size=sample_size, replace=True)
sample_smiles_2 = np.random.choice(smiles_2, size=sample_size, replace=True)
else:
sample_smiles_1, sample_smiles_2 = smiles_1, smiles_2
sample_num_pairs = len(sample_smiles_1) * len(sample_smiles_2)
for smile_1, smile_2 in tqdm(product(sample_smiles_1, sample_smiles_2), total=sample_num_pairs):
mol_1, mol_2 = Chem.MolFromSmiles(smile_1), Chem.MolFromSmiles(smile_2)
fp_1, fp_2 = AllChem.GetMorganFingerprint(mol_1, radius), AllChem.GetMorganFingerprint(mol_2, radius)
similarity = DataStructs.TanimotoSimilarity(fp_1, fp_2)
similarities.append(similarity)
similarities = np.array(similarities)
# Print results
print()
print(f'Average dice similarity = {np.mean(similarities):.4f} +/- {np.std(similarities):.4f}')
print(f'Minimum dice similarity = {np.min(similarities):.4f}')
print(f'Maximum dice similarity = {np.max(similarities):.4f}')
print()
print('Percentiles for dice similarity')
print(' | '.join([f'{i}% = {np.percentile(similarities, i):.4f}' for i in range(0, 101, 10)]))
if __name__ == '__main__':
args = Args().parse_args()
data_1 = get_data(path=args.data_path_1, smiles_column=args.smiles_column_1)
data_2 = get_data(path=args.data_path_2, smiles_column=args.smiles_column_2)
if args.similarity_measure == 'scaffold':
scaffold_similarity(data_1.smiles(), data_2.smiles())
elif args.similarity_measure == 'morgan':
morgan_similarity(data_1.smiles(), data_2.smiles(), args.radius, args.sample_rate)
else:
raise ValueError(f'Similarity measure "{args.similarity_measure}" not supported.')
|
the-stack_0_15156 | """
Windowing processes for windowing over days
"""
import udatetime
import datetime
from numpy import argmin, abs, array
from sit2standpy.v2.base import _BaseProcess, PROC, DATA
__all__ = ['WindowDays']
class WindowDays(_BaseProcess):
def __init__(self, hours=[8, 20], **kwargs):
"""
Window data into days, with the default behaviour to take the hours of most likely wakefulness
Parameters
----------
hours : list-like of int
Hours to include in the windowed data. Default is 8 to 20, which excludes the night from
the detection of sit-to-stand transfers.
"""
super().__init__(**kwargs)
self._hours = hours
def _call(self):
utime = self.data['Sensors']['Lumbar']['Unix Time']
# get the first timepoint to know which day to start and end with
time_sdt = udatetime.utcfromtimestamp(utime[0])
time_edt = udatetime.utcfromtimestamp(utime[-1])
n_days = (time_edt.date() - time_sdt.date()).days
if time_edt.hour > self._hours[0]:
n_days += 1
# set the start and end hours for the first day
day_start = time_sdt.replace(hour=self._hours[0], minute=0, second=0, microsecond=0)
day_end = time_sdt.replace(hour=self._hours[1], minute=0, second=0, microsecond=0)
iend = 10 # set so can reference in the i=0 loop
for i in range(n_days):
istart = argmin(abs(utime[iend-10:] - day_start.timestamp())) + iend - 10
iend = argmin(abs(utime[istart:] - day_end.timestamp())) + istart + 1
self.data = (PROC.format(day_n=i+1, value='Indices'), array([istart, iend]))
day_start += datetime.timedelta(days=1)
day_end += datetime.timedelta(days=1)
|
the-stack_0_15157 | import logging
from PyQt5 import QtCore, QtGui, QtWidgets
from hackedit.app import settings
from hackedit.app.forms import dlg_preferences_ui
from hackedit.app.widgets import preference_pages
from hackedit.api import system
def _logger():
return logging.getLogger(__name__)
class DlgPreferences(QtWidgets.QDialog):
_dlg = None
closed = QtCore.pyqtSignal()
color_highlight_background = None
color_highlight_text = None
def __init__(self, parent, app):
super().__init__(parent)
if DlgPreferences.color_highlight_background is None:
DlgPreferences.color_highlight_background = \
self.palette().color(QtGui.QPalette.Highlight).name()
if DlgPreferences.color_highlight_text is None:
DlgPreferences.color_highlight_text = self.palette().color(
QtGui.QPalette.HighlightedText).name()
self.app = app
self._ui = dlg_preferences_ui.Ui_Dialog()
self._ui.setupUi(self)
self._connect_slots()
# force reload of settings
settings.load()
self._setup_builtin_pages()
self._setup_editor_pages()
self._setup_plugin_pages()
self._ui.categories.sortByColumn(0, QtCore.Qt.AscendingOrder)
self._ui.categories.expandAll()
self.restore_state()
btns = self._ui.buttons
btns.button(btns.Reset).setToolTip(
_('Reset changes made to the current page.'))
btns.button(btns.RestoreDefaults).setToolTip(
_('Restore factory defaults for the current page.'))
btns.button(btns.Apply).setToolTip(
_('Apply changes but keep dialog open.'))
btns.button(btns.Ok).setToolTip(
_('Apply changes and close dialog.'))
btns.button(btns.Cancel).setToolTip(
_('Close dialog and cancel any changes.'))
self._ui.pages.setContentsMargins(0, 0, 0, 0)
def closeEvent(self, event):
super().closeEvent(event)
self.closed.emit()
@staticmethod
def edit_preferences(parent, app):
QtWidgets.qApp.setOverrideCursor(QtCore.Qt.WaitCursor)
dlg = DlgPreferences(parent, app)
dlg.restore_state()
QtWidgets.qApp.restoreOverrideCursor()
if system.DARWIN:
dlg.showMaximized()
else:
dlg.show()
dlg.exec_()
def goto_page(self, page_name):
def get_page():
for i in range(self._ui.categories.topLevelItemCount()):
item = self._ui.categories.topLevelItem(i)
if item.text(0) == page_name:
return item
for j in range(item.childCount()):
child_item = item.child(j)
if child_item.text(0) == page_name:
return child_item
return None
item = get_page()
self._ui.categories.setCurrentItem(item)
def _find_item_by_index(self, index):
for i in range(self._ui.categories.topLevelItemCount()):
item = self._ui.categories.topLevelItem(i)
idx = item.data(0, QtCore.Qt.UserRole)
if idx == index:
return item
assert isinstance(item, QtWidgets.QTreeWidgetItem)
for j in range(item.childCount()):
child_item = item.child(j)
idx = child_item.data(0, QtCore.Qt.UserRole)
if idx == index:
return child_item
return None
def _on_item_activated(self, item):
index = item.data(0, QtCore.Qt.UserRole)
text = item.text(0)
if item.parent() is not None:
text = '%s - %s' % (item.parent().text(0), text)
self._ui.label_title.setText(text)
self._ui.label_title.setStyleSheet('''background-color: %s;
color: %s;
padding: 10px;
border-radius:3px;''' % (DlgPreferences.color_highlight_background,
DlgPreferences.color_highlight_text))
self._ui.pages.setCurrentIndex(index)
w = self._ui.pages.currentWidget()
buttons = self._ui.buttons
buttons.button(buttons.Reset).setVisible(w.can_reset)
buttons.button(buttons.RestoreDefaults).setVisible(
w.can_restore_defaults)
buttons.button(buttons.Apply).setVisible(w.can_apply)
def _reset(self):
self._ui.pages.currentWidget().reset()
def _restore_defaults(self):
QtWidgets.qApp.setOverrideCursor(QtCore.Qt.WaitCursor)
self._ui.pages.currentWidget().restore_defaults()
self._reset()
QtWidgets.qApp.restoreOverrideCursor()
for i in range(self._ui.pages.count()):
page = self._ui.pages.widget(i)
page.save()
def _apply(self):
# save all settings
QtWidgets.qApp.setOverrideCursor(QtCore.Qt.WaitCursor)
try:
for i in range(self._ui.pages.count()):
page = self._ui.pages.widget(i)
page.save()
self.app.apply_preferences()
for i in range(self._ui.pages.count()):
page = self._ui.pages.widget(i)
page.reset()
finally:
QtWidgets.qApp.restoreOverrideCursor()
def restore_state(self):
index = int(QtCore.QSettings().value(
'_cache/preferences_page_index', 0))
item = self._find_item_by_index(index)
self._ui.categories.setCurrentItem(item)
def _setup_builtin_pages(self):
env = preference_pages.Environment()
self._add_page(env)
self._add_page(preference_pages.Editor())
self._add_page(preference_pages.Behaviour())
colors = preference_pages.EditorColors()
env.colors = colors
self._add_page(colors)
self._add_page(preference_pages.EditorDisplay())
self._add_page(preference_pages.Indexing())
self._add_page(preference_pages.Mimetypes())
self._add_page(preference_pages.Notifications())
self._add_page(preference_pages.Shortcuts())
self._add_page(preference_pages.Templates())
self._add_page(preference_pages.Workspaces())
def _setup_plugin_pages(self):
pages = []
for p in self.app.plugin_manager.preferences_page_plugins:
pages.append(p.get_preferences_page())
for p in sorted(pages, key=lambda x: x.category is not None):
self._add_page(p)
pages[:] = []
for p in self.app.plugin_manager.workspace_plugins.values():
page = p.get_preferences_page()
if page:
pages.append(page)
for p in sorted(pages, key=lambda x: x.category is not None):
self._add_page(p)
def _setup_editor_pages(self):
for p in self.app.plugin_manager.editor_plugins:
p = p.get_specific_preferences_page()
if p:
p.category = _('Editor')
self._add_page(p)
def _connect_slots(self):
self._ui.categories.currentItemChanged.connect(
self._on_item_activated)
self._ui.buttons.button(self._ui.buttons.Reset).clicked.connect(
self._reset)
bt_restore_defaults = self._ui.buttons.button(
self._ui.buttons.RestoreDefaults)
bt_restore_defaults.clicked.connect(self._restore_defaults)
self._ui.buttons.button(self._ui.buttons.Apply).clicked.connect(
self._apply)
def accept(self):
self._apply()
QtCore.QSettings().setValue(
'_cache/preferences_page_index', self._ui.pages.currentIndex())
super().accept()
def reject(self):
QtCore.QSettings().setValue(
'_cache/preferences_page_index', self._ui.pages.currentIndex())
super().reject()
def _add_page(self, widget):
"""
Adds a settings page to the dialog
:param widget: page widget
:type widget: hackedit.api.widgets.PreferencePage
"""
if widget is None:
return
widget.setContentsMargins(0, 0, 0, 0)
index = self._ui.pages.count()
self._ui.pages.addWidget(widget)
item = QtWidgets.QTreeWidgetItem()
item.setText(0, widget.name)
if widget.icon is not None:
item.setIcon(0, widget.icon)
item.setData(0, QtCore.Qt.UserRole, index)
parent = None
if widget.category:
parent = self._ui.categories.findItems(
widget.category, QtCore.Qt.MatchExactly, 0)
if parent:
parent = parent[0]
else:
print('parent not found', widget.category)
if parent:
parent.addChild(item)
else:
self._ui.categories.addTopLevelItem(item)
widget.app = self.app
widget.reset()
def keyPressEvent(self, ev):
if ev.key() == QtCore.Qt.Key_Enter or \
ev.key() == QtCore.Qt.Key_Return:
return
super().keyPressEvent(ev)
|
the-stack_0_15158 | import urllib.request as urlr
import urllib.parse as urlp
import urllib.error as urle
import http.cookiejar as ck
import pickle as pk
import re
import zlib
import time
import random as r
from datetime import datetime, timedelta
import os
from lxml import etree
import tkinter.messagebox
from tkinter import *
def get_Pkls(path):
if os.path.exists(path+'userdata.pkl'):
with open(path+'userdata.pkl', 'rb') as f1:
user_data = pk.load(f1)
else:
user_data = dict()
with open(path+'headers.pkl', 'rb') as f2:
head = pk.load(f2)
with open(path+'urls.pkl', 'rb') as f3:
url = pk.load(f3)
return (user_data, head, url)
def gzip_decode(rsp,code='utf-8'):
if rsp != None:
content = rsp.read()
gzipped = rsp.headers.get('Content-Encoding')
if gzipped:
html = zlib.decompress(content, zlib.MAX_WBITS | 32).decode(code,errors='ignore')
else:
html = content.decode(code)
return html
return ''
def get_FormHash(html):
tgt = re.search(r'name="formhash" value="(.+?)"',html)
if tgt:
return tgt.group(1)
return ''
def get_loginhash(html):
tgt = re.search(r'<div id="main_messaqge_(.+?)">',html)
if tgt:
return tgt.group(1)
return ''
def set_pgvs():
curMs = datetime.utcnow().second
pgv_ssid = "s" + str( (round(r.random() * 2147483647) * curMs) % 10000000000 )
pgv_pvi = (round(r.random() * 2147483647) * curMs) % 10000000000
return (pgv_ssid, pgv_pvi)
class WebService:
def __init__(self, path):
self.cookie_name = 'cookie.txt'
self.code = 'utf-8'
self.path = path
self.userdata, self.headers, self.urls = get_Pkls(self.path)
self.init_userdata()
self.init_cookie()
self.new_opener()
self.error = False
self.get_saylist()
def init_userdata(self):
if self.userdata.get('mission') == None:
self.userdata['mission'] = True
if self.userdata.get('autologin') == None:
self.userdata['autologin'] = True
def init_cookie(self):
self.cookie = ck.LWPCookieJar(self.path + self.cookie_name)
try:
self.cookie.load(self.path + self.cookie_name, True, True)
except FileNotFoundError:
self.cookie.save(self.path + self.cookie_name, True, True)
def save_cookie(self):
self.cookie.save(self.path + self.cookie_name, True, True)
def new_opener(self):
self.opener = urlr.build_opener(urlr.HTTPCookieProcessor(self.cookie))
def get_prelogin_data(self):
self.data = {}
self.data['username'] = self.userdata['username']
self.data['password'] = self.userdata['password']
self.data['loginfield'] = 'username'
self.data['cookietime'] = '2592000'
self.data['quickforward'] = 'yes'
self.data['handlekey'] = 'ls'
self.post_data = urlp.urlencode(self.data).encode(self.code)
def get_response(self, url, data=None, headers=None):
temp_headers = self.headers if headers==None else headers
if data:
req = urlr.Request(url, data, temp_headers)
else:
req = urlr.Request(url, headers=temp_headers)
try:
response = self.opener.open(req)
except urle.URLError as e:
if hasattr(e,'reason'):
print('We failed to reach a server.')
print('Reason: ', e.reason)
prompt = 'We failed to reach a server.\nReason: ' + str(e.reason)
elif hasattr(e, 'code'):
print('The server could\' fulfill the request.')
print('Error code: ', e.code)
prompt = 'The server could\' fulfill the request.\nError code: ' + str(e.code)
tkinter.messagebox.showerror('出错啦!', prompt)
response = None
except ConnectionResetError as e:
print(e)
tkinter.messagebox.showerror('出错啦!', e)
response = None
finally:
return response
def get_Verify_img(self):
os.chdir(self.path)
response = self.get_response(self.urls['code_url'])
if response:
img = response.read()
with open('code.png', 'wb') as f:
f.write(img)
os.chdir(os.pardir)
def get_login_data(self, code):
self.data.pop('quickforward')
self.data.pop('handlekey')
self.data['formhash'] = get_FormHash(self.html)
self.data['tsdm_verify'] = code#获取Entry值
self.data['answer'] = ''
self.data['questionid'] = '0'
self.data['referer'] = 'https://www.tsdm39.net/forum.php'
self.post_data = urlp.urlencode(self.data).encode(self.code)
def add_cookie(self, name, value):
temp_cookie = ck.Cookie(
version=0,
name=name,
value=value,
port=None,
port_specified=False,
domain=".tsdm39.net",
domain_specified=True,
domain_initial_dot=False,
path="/",
path_specified=True,
secure=False,
expires=None,
discard=False,
comment=None,
comment_url=None,
rest={}
)
self.cookie.set_cookie(temp_cookie)
def get_pgv_cookie(self):
os.chdir(self.path)
if os.path.exists('pgv.txt'):
with open('pgv.txt') as f:
pgv_str = f.read()
tgt = re.search(r'pgv_pvi=(.+); pgv_info=(.+)', pgv_str)
self.add_cookie('pgv_pvi', tgt.group(1))
self.add_cookie('pgv_info', tgt.group(2))
si = re.search(r'ssi=(.+)', tgt.group(2)).group(1)
r3 = int(r.random() * 450) + 350
url = 'http://pingtcss.qq.com/pingd?dm=www.tsdm39.net&url=/forum.php&arg=-&rdm=-&rurl=-&adt=-&rarg=-&pvi=' + tgt.group(1) +'&si=' + si + '&ui=0&ty=1&rt=forum&pn=1&qq=000&r2=8480046&scr=1366x768&scl=24-bit&lg=zh-cn&jv=0&pf=Win32&tz=-8&fl=26.0%20r0&ct=-&ext=bc=0;adid=&r3=' + str(r3)
self.get_response(url)
else:
pgv_ssid, pgv_pvi = set_pgvs()
r3 = int(r.random() * 450) + 350
url = 'http://pingtcss.qq.com/pingd?dm=www.tsdm39.net&url=/forum.php&arg=-&rdm=-&rurl=-&adt=-&rarg=-&pvi=' + str(pgv_pvi) +'&si=' + str(pgv_ssid) + '&ui=0&ty=1&rt=forum&pn=1&qq=000&r2=8480046&scr=1366x768&scl=24-bit&lg=zh-cn&jv=0&pf=Win32&tz=-8&fl=26.0%20r0&ct=-&ext=bc=0;adid=&r3=' + str(r3)
self.get_response(url)
pgv_str = 'pgv_pvi=' + str(pgv_pvi) +'; pgv_info=ssi=' + str(pgv_ssid)
with open('pgv.txt', 'wt') as f:
f.write(pgv_str)
self.add_cookie('pgv_pvi',str(pgv_pvi))
self.add_cookie('pgv_info','ssi=' + str(pgv_ssid))
os.chdir(os.pardir)
def autoLogin(self):
response = self.get_response(self.urls['first_url'])
self.html = gzip_decode(response)
return re.search(r'title="访问我的空间">(.+)</a>',self.html)
def is_login(self, html):
return re.search(self.userdata['username'], html)
def get_enter_url(self, account, passwd):
self.userdata['username'] = account
self.userdata['password'] = passwd
self.get_prelogin_data()
response = self.get_response(self.urls['login_url'], self.post_data)
self.save_cookie()
self.html = gzip_decode(response)
self.urls['enter_url'] = self.urls['enter_url_start'] + get_loginhash(self.html) + self.urls['enter_url_end']
self.get_Verify_img()
def save_userdata(self):
with open(self.path + 'userdata.pkl', 'wb') as f:
pk.dump(self.userdata, f)
def get_author_image(self):
html = etree.HTML(self.html)
src = html.xpath('//div[@id="um"]/div[@class="avt y"]/a/img/@data-original')[0]
rsp = self.get_response(src)
if rsp:
img = rsp.read()
with open(self.path+'author.jpg','wb') as f:
f.write(img)
def get_saylist(self):
if os.path.exists(self.path+'saylist.txt'):
self.saylist = []
with open(self.path+'saylist.txt') as f:
for each in f:
each = each.strip()
if each:
self.saylist.append(each)
else:
prompt = '天使奏赛高!!!\n真白赛高~~~\n日常签到。。。\n'
self.saylist = ['天使奏赛高!!!','真白赛高~~~','日常签到。。。']
with open(self.path+'saylist.txt' ,'wt') as f:
f.write(prompt)
def get_sign_data(self):
rsp = self.get_response(self.urls['to_sign_url'])
self.html = gzip_decode(rsp)
sign_data = {}
sign_data['todaysay'] = r.choice(self.saylist)
sign_data['qdxq'] = 'kx'
sign_data['qdmode'] = '1'
sign_data['formhash'] = get_FormHash(self.html)
sign_data['fastreply'] = '1'
self.post_data = urlp.urlencode(sign_data).encode(self.code)
def do_sign(self):
rsp = self.get_response(self.urls['sign_url'], self.post_data)
self.save_cookie()
self.html = gzip_decode(rsp)
rand_money = re.findall(r'恭喜你签到成功!(.+?)</div>', self.html)
signed = re.search(r'您今日已经签到', self.html)
if rand_money:
return ('签到成功!%s' % rand_money[0])
elif signed:
return '您今日已经签到,请明天再来!'
else:
return None
def pre_mission(self):
rsp = self.get_response(self.urls['to_mission_url'])
self.html = gzip_decode(rsp)
return self.is_login(self.html)
def do_mission(self):
mission_data = {'act': 'clickad'}
self.post_data = urlp.urlencode(mission_data).encode(self.code)
rsp = self.get_response(self.urls['mission_url'], self.post_data)
self.html = gzip_decode(rsp)
wait = re.search(r'您需要等待(.+)后即可进行。',self.html)
time.sleep(r.randint(2,5))
if wait:
return wait.group(1)
else:
for i in range(5):
rsp = self.get_response(self.urls['mission_url'], self.post_data)
time.sleep(r.randint(2,5))
mission_data['act'] = 'getcre'
self.post_data = urlp.urlencode(mission_data).encode(self.code)
rsp = self.get_response(self.urls['mission_url'],self.post_data)
self.save_cookie()
self.html = gzip_decode(rsp)
self.mission_money = re.search(r'恭喜,您已经(成功领取了奖励天使币.+)<br />(每间隔.+可进行一次)。',self.html)
fail = re.search(r'不要作弊哦,重新进行游戏吧!',self.html)
if fail:
return 'fail'
return None
class Logs:
def __init__(self, path=os.curdir, filename='logs.pkl'):
self.logname = filename
self.path = path
self.init_logs()
def init_logs(self):
if os.path.exists(self.path + self.logname):
with open(self.path + self.logname, 'rb') as f:
self.logs = pk.load(f)
else:
self.logs = dict()
def save_logs(self):
with open(self.path + self.logname, 'wb') as f:
pk.dump(self.logs, f)
def log2file(self, content):
prompt = self.date2str(self.now())+content+'\n'
if os.path.exists(self.path+'logs.txt'):
with open(self.path+'logs.txt', 'at') as f:
f.write(prompt)
else:
with open(self.path+'logs.txt', 'wt') as f:
f.write(prompt)
def datelist2str(self, datelist):
return (str(datelist[0])+'年'+str(datelist[1])+'月'+str(datelist[2])+'日<-'+str(datelist[3])+':'+str(datelist[4])+':'+str(datelist[5])+'->:')
def date2str(self, date):
return (str(date.year)+'年'+str(date.month)+'月'+str(date.day)+'日<-'+str(date.hour)+':'+str(date.minute)+':'+str(date.second)+'->:')
def update_log(self, name, time=datetime.now(), save=True):
self.logs[name] = self.date2list(time)
if save:
self.save_logs()
def now(self):
return datetime.now()
def dt_list2sec(self, datelist2, datelist1):
dt = self.list2date(datelist2) - self.list2date(datelist1)
return dt.seconds
def date2list(self, date):
datelist = []
datelist.append(date.year)
datelist.append(date.month)
datelist.append(date.day)
datelist.append(date.hour)
datelist.append(date.minute)
datelist.append(date.second)
return datelist
def list2date(self, datelist):
return datetime(datelist[0],datelist[1],datelist[2],datelist[3],datelist[4],datelist[5])
def sign_avaliable(self):
if self.logs.get('sign'):
dt = self.now() - self.list2date(self.logs['sign'])
if (self.now().day - self.logs['sign'][2] >= 1) or (dt.seconds > 24*60*60):
return True
else:
return False
return True
def mission_avaliable(self):
if self.logs.get('mission'):
delta = self.now() - self.list2date(self.logs['mission'])
dt = 6*60*60 - delta.seconds
if dt > 0:
return dt
return True
def get_missionedtime(self, dtlist):
dt = timedelta(hours=6) - timedelta(hours=dtlist[0], minutes=dtlist[1], seconds=dtlist[2])
self.update_log('mission', self.now() - dt)
|
the-stack_0_15160 | """Provides macros for working with yoga library."""
YOGA_ROOTS = ["//..."]
JAVA_TARGET = "//java:java"
INFER_ANNOTATIONS_TARGET = "//lib/infer-annotations:infer-annotations"
JSR_305_TARGET = "//lib/jsr-305:jsr-305"
JUNIT_TARGET = "//lib/junit:junit"
PROGRUARD_ANNOTATIONS_TARGET = "//java/proguard-annotations/src/main/java/com/facebook/proguard/annotations:annotations"
SOLOADER_TARGET = "//lib/soloader:soloader"
GTEST_TARGET = "//lib/gtest:gtest"
JNI_TARGET = "//lib/jni:jni"
FBJNI_TARGET = "//lib/fb:fbjni"
FBJNI_JAVA_TARGET = "//lib/fb/src/main/java/com/facebook/jni:jni"
APPCOMPAT_TARGET = "//lib/appcompat:appcompat"
APPLE = ""
ANDROID = ""
ANDROID_SUPPORT_TARGET = "//lib/android-support:android-support"
ANDROID_TARGET = "//android:android"
ANDROID_JAVA_TARGET = "//android/src/main/java/com/facebook/yoga/android:android"
ANDROID_RES_TARGET = "//android:res"
ANDROID_SAMPLE_JAVA_TARGET = "//android/sample/java/com/facebook/samples/yoga:yoga"
ANDROID_SAMPLE_RES_TARGET = "//android/sample:res"
CXX_LIBRARY_WHITELIST = [
"//:yoga",
"//lib/fb:fbjni",
"//java:jni",
]
BASE_COMPILER_FLAGS = [
"-fno-omit-frame-pointer",
"-fexceptions",
"-Wall",
"-Werror",
"-O3",
"-ffast-math",
]
LIBRARY_COMPILER_FLAGS = BASE_COMPILER_FLAGS + [
"-fPIC",
]
def _paths_join(path, *others):
"""Joins one or more path components."""
result = path
for p in others:
if p.startswith("/"): # absolute
result = p
elif not result or result.endswith("/"):
result += p
else:
result += "/" + p
return result
def subdir_glob(glob_specs, exclude = None, prefix = ""):
"""Returns a dict of sub-directory relative paths to full paths.
The subdir_glob() function is useful for defining header maps for C/C++
libraries which should be relative the given sub-directory.
Given a list of tuples, the form of (relative-sub-directory, glob-pattern),
it returns a dict of sub-directory relative paths to full paths.
Please refer to native.glob() for explanations and examples of the pattern.
Args:
glob_specs: The array of tuples in form of
(relative-sub-directory, glob-pattern inside relative-sub-directory).
type: List[Tuple[str, str]]
exclude: A list of patterns to identify files that should be removed
from the set specified by the first argument. Defaults to [].
type: Optional[List[str]]
prefix: If is not None, prepends it to each key in the dictionary.
Defaults to None.
type: Optional[str]
Returns:
A dict of sub-directory relative paths to full paths.
"""
if exclude == None:
exclude = []
results = []
for dirpath, glob_pattern in glob_specs:
results.append(
_single_subdir_glob(dirpath, glob_pattern, exclude, prefix),
)
return _merge_maps(*results)
def _merge_maps(*file_maps):
result = {}
for file_map in file_maps:
for key in file_map:
if key in result and result[key] != file_map[key]:
fail(
"Conflicting files in file search paths. " +
"\"%s\" maps to both \"%s\" and \"%s\"." %
(key, result[key], file_map[key]),
)
result[key] = file_map[key]
return result
def _single_subdir_glob(dirpath, glob_pattern, exclude = None, prefix = None):
if exclude == None:
exclude = []
results = {}
files = native.glob([_paths_join(dirpath, glob_pattern)], exclude = exclude)
for f in files:
if dirpath:
key = f[len(dirpath) + 1:]
else:
key = f
if prefix:
key = _paths_join(prefix, key)
results[key] = f
return results
def yoga_dep(dep):
return "//" + dep
def yoga_android_aar(*args, **kwargs):
native.android_aar(*args, **kwargs)
def yoga_android_binary(*args, **kwargs):
native.android_binary(*args, **kwargs)
def yoga_android_library(*args, **kwargs):
native.android_library(*args, **kwargs)
def yoga_android_resource(*args, **kwargs):
native.android_resource(*args, **kwargs)
def yoga_apple_library(*args, **kwargs):
native.apple_library(*args, **kwargs)
def yoga_apple_test(*args, **kwargs):
native.apple_test(*args, **kwargs)
def yoga_cxx_binary(*args, **kwargs):
kwargs.pop("platforms", None)
native.cxx_binary(*args, **kwargs)
def yoga_cxx_library(*args, **kwargs):
# Currently unused
kwargs.pop("platforms", None)
native.cxx_library(*args, **kwargs)
def yoga_cxx_test(*args, **kwargs):
native.cxx_test(*args, **kwargs)
def yoga_java_binary(*args, **kwargs):
native.java_binary(*args, **kwargs)
def yoga_java_library(*args, **kwargs):
native.java_library(*args, **kwargs)
def yoga_java_test(*args, **kwargs):
native.java_test(*args, **kwargs)
def yoga_prebuilt_cxx_library(*args, **kwargs):
native.prebuilt_cxx_library(*args, **kwargs)
def yoga_prebuilt_jar(*args, **kwargs):
native.prebuilt_jar(*args, **kwargs)
def is_apple_platform():
return True
def yoga_apple_binary():
if is_apple_platform():
yoganet_ios_srcs = []
for arch in [
"iphonesimulator-x86_64",
"iphoneos-arm64",
]:
name = "yoganet-" + arch
yoganet_ios_srcs.append(":" + name)
native.genrule(
name = name,
srcs = [
yoga_dep(":yogaApple#%s,static" % arch),
yoga_dep("YogaKit:YogaKitApple#%s,static" % arch),
yoga_dep("csharp:yoganetApple#%s,static" % arch),
],
out = "libyoga-%s.a" % arch,
cmd = "libtool -static -o $OUT $SRCS",
visibility = [yoga_dep("csharp:yoganet-ios")],
)
native.genrule(
name = "yoganet-ios",
srcs = yoganet_ios_srcs,
out = "libyoga.a",
cmd = "lipo $SRCS -create -output $OUT",
visibility = ["PUBLIC"],
)
yoganet_macosx_target = "csharp:yoganetAppleMac#macosx-%s,dynamic"
native.genrule(
name = "yoganet-macosx",
srcs = [
yoga_dep(yoganet_macosx_target % "x86_64"),
],
out = "libyoga.dylib",
cmd = "lipo $SRCS -create -output $OUT",
visibility = ["PUBLIC"],
)
|
the-stack_0_15161 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from botbuilder.core import CardFactory, MessageFactory
from botbuilder.dialogs import (
ComponentDialog,
DialogSet,
DialogTurnStatus,
WaterfallDialog,
WaterfallStepContext,
)
from botbuilder.dialogs.prompts import TextPrompt, PromptOptions
from botbuilder.schema import (
ActionTypes,
Attachment,
AnimationCard,
AudioCard,
HeroCard,
VideoCard,
ReceiptCard,
SigninCard,
ThumbnailCard,
MediaUrl,
CardAction,
CardImage,
ThumbnailUrl,
Fact,
ReceiptItem,
)
from .resources.adaptive_card_example import ADAPTIVE_CARD_CONTENT
from helpers.activity_helper import create_activity_reply
MAIN_WATERFALL_DIALOG = "mainWaterfallDialog"
class MainDialog(ComponentDialog):
def __init__(self):
super().__init__("MainDialog")
# Define the main dialog and its related components.
self.add_dialog(TextPrompt("TextPrompt"))
self.add_dialog(
WaterfallDialog(
MAIN_WATERFALL_DIALOG, [self.choice_card_step, self.show_card_step]
)
)
# The initial child Dialog to run.
self.initial_dialog_id = MAIN_WATERFALL_DIALOG
"""
1. Prompts the user if the user is not in the middle of a dialog.
2. Re-prompts the user when an invalid input is received.
"""
async def choice_card_step(self, step_context: WaterfallStepContext):
menu_text = (
"Which card would you like to see?\n"
"(1) Adaptive Card\n"
"(2) Animation Card\n"
"(3) Audio Card\n"
"(4) Hero Card\n"
"(5) Receipt Card\n"
"(6) Signin Card\n"
"(7) Thumbnail Card\n"
"(8) Video Card\n"
"(9) All Cards"
)
# Prompt the user with the configured PromptOptions.
return await step_context.prompt(
"TextPrompt", PromptOptions(prompt=MessageFactory.text(menu_text))
)
"""
Send a Rich Card response to the user based on their choice.
self method is only called when a valid prompt response is parsed from the user's response to the ChoicePrompt.
"""
async def show_card_step(self, step_context: WaterfallStepContext):
response = step_context.result.lower().strip()
choice_dict = {
"1": [self.create_adaptive_card],
"adaptive card": [self.create_adaptive_card],
"2": [self.create_animation_card],
"animation card": [self.create_animation_card],
"3": [self.create_audio_card],
"audio card": [self.create_audio_card],
"4": [self.create_hero_card],
"hero card": [self.create_hero_card],
"5": [self.create_receipt_card],
"receipt card": [self.create_receipt_card],
"6": [self.create_signin_card],
"signin card": [self.create_signin_card],
"7": [self.create_thumbnail_card],
"thumbnail card": [self.create_thumbnail_card],
"8": [self.create_video_card],
"video card": [self.create_video_card],
"9": [
self.create_adaptive_card,
self.create_animation_card,
self.create_audio_card,
self.create_hero_card,
self.create_receipt_card,
self.create_signin_card,
self.create_thumbnail_card,
self.create_video_card,
],
"all cards": [
self.create_adaptive_card,
self.create_animation_card,
self.create_audio_card,
self.create_hero_card,
self.create_receipt_card,
self.create_signin_card,
self.create_thumbnail_card,
self.create_video_card,
],
}
# Get the functions that will generate the card(s) for our response
# If the stripped response from the user is not found in our choice_dict, default to None
choice = choice_dict.get(response, None)
# If the user's choice was not found, respond saying the bot didn't understand the user's response.
if not choice:
not_found = create_activity_reply(
step_context.context.activity, "Sorry, I didn't understand that. :("
)
await step_context.context.send_activity(not_found)
else:
for func in choice:
card = func()
response = create_activity_reply(
step_context.context.activity, "", "", [card]
)
await step_context.context.send_activity(response)
# Give the user instructions about what to do next
await step_context.context.send_activity("Type anything to see another card.")
return await step_context.end_dialog()
"""
======================================
Helper functions used to create cards.
======================================
"""
# Methods to generate cards
def create_adaptive_card(self) -> Attachment:
return CardFactory.adaptive_card(ADAPTIVE_CARD_CONTENT)
def create_animation_card(self) -> Attachment:
card = AnimationCard(
media=[MediaUrl(url="http://i.giphy.com/Ki55RUbOV5njy.gif")],
title="Microsoft Bot Framework",
subtitle="Animation Card",
)
return CardFactory.animation_card(card)
def create_audio_card(self) -> Attachment:
card = AudioCard(
media=[MediaUrl(url="http://www.wavlist.com/movies/004/father.wav")],
title="I am your father",
subtitle="Star Wars: Episode V - The Empire Strikes Back",
text="The Empire Strikes Back (also known as Star Wars: Episode V – The Empire Strikes "
"Back) is a 1980 American epic space opera film directed by Irvin Kershner. Leigh "
"Brackett and Lawrence Kasdan wrote the screenplay, with George Lucas writing the "
"film's story and serving as executive producer. The second installment in the "
"original Star Wars trilogy, it was produced by Gary Kurtz for Lucasfilm Ltd. and "
"stars Mark Hamill, Harrison Ford, Carrie Fisher, Billy Dee Williams, Anthony "
"Daniels, David Prowse, Kenny Baker, Peter Mayhew and Frank Oz.",
image=ThumbnailUrl(
url="https://upload.wikimedia.org/wikipedia/en/3/3c/SW_-_Empire_Strikes_Back.jpg"
),
buttons=[
CardAction(
type=ActionTypes.open_url,
title="Read more",
value="https://en.wikipedia.org/wiki/The_Empire_Strikes_Back",
)
],
)
return CardFactory.audio_card(card)
def create_hero_card(self) -> Attachment:
card = HeroCard(
title="",
images=[
CardImage(
url="https://sec.ch9.ms/ch9/7ff5/e07cfef0-aa3b-40bb-9baa-7c9ef8ff7ff5/buildreactionbotframework_960.jpg"
)
],
buttons=[
CardAction(
type=ActionTypes.open_url,
title="Get Started",
value="https://docs.microsoft.com/en-us/azure/bot-service/",
)
],
)
return CardFactory.hero_card(card)
def create_video_card(self) -> Attachment:
card = VideoCard(
title="Big Buck Bunny",
subtitle="by the Blender Institute",
text="Big Buck Bunny (code-named Peach) is a short computer-animated comedy film by the Blender "
"Institute, part of the Blender Foundation. Like the foundation's previous film Elephants "
"Dream, the film was made using Blender, a free software application for animation made by "
"the same foundation. It was released as an open-source film under Creative Commons License "
"Attribution 3.0.",
media=[
MediaUrl(
url="http://download.blender.org/peach/bigbuckbunny_movies/"
"BigBuckBunny_320x180.mp4"
)
],
buttons=[
CardAction(
type=ActionTypes.open_url,
title="Learn More",
value="https://peach.blender.org/",
)
],
)
return CardFactory.video_card(card)
def create_receipt_card(self) -> Attachment:
card = ReceiptCard(
title="John Doe",
facts=[
Fact(key="Order Number", value="1234"),
Fact(key="Payment Method", value="VISA 5555-****"),
],
items=[
ReceiptItem(
title="Data Transfer",
price="$38.45",
quantity="368",
image=CardImage(
url="https://github.com/amido/azure-vector-icons/raw/master/"
"renders/traffic-manager.png"
),
),
ReceiptItem(
title="App Service",
price="$45.00",
quantity="720",
image=CardImage(
url="https://github.com/amido/azure-vector-icons/raw/master/"
"renders/cloud-service.png"
),
),
],
tax="$7.50",
total="90.95",
buttons=[
CardAction(
type=ActionTypes.open_url,
title="More Information",
value="https://azure.microsoft.com/en-us/pricing/details/bot-service/",
)
],
)
return CardFactory.receipt_card(card)
def create_signin_card(self) -> Attachment:
card = SigninCard(
text="BotFramework Sign-in Card",
buttons=[
CardAction(
type=ActionTypes.signin,
title="Sign-in",
value="https://login.microsoftonline.com",
)
],
)
return CardFactory.signin_card(card)
def create_thumbnail_card(self) -> Attachment:
card = ThumbnailCard(
title="BotFramework Thumbnail Card",
subtitle="Your bots — wherever your users are talking",
text="Build and connect intelligent bots to interact with your users naturally wherever"
" they are, from text/sms to Skype, Slack, Office 365 mail and other popular services.",
images=[
CardImage(
url="https://sec.ch9.ms/ch9/7ff5/"
"e07cfef0-aa3b-40bb-9baa-7c9ef8ff7ff5/"
"buildreactionbotframework_960.jpg"
)
],
buttons=[
CardAction(
type=ActionTypes.open_url,
title="Get Started",
value="https://docs.microsoft.com/en-us/azure/bot-service/",
)
],
)
return CardFactory.thumbnail_card(card)
|
the-stack_0_15162 | # ParallelContext transfer functionality tests.
# This uses nonsense models and values to verify transfer takes place correctly.
import sys
from io import StringIO
from neuron import h
#h.nrnmpi_init()
pc = h.ParallelContext()
rank = pc.id()
nhost = pc.nhost()
if nhost > 1:
if rank == 0:
print("nhost > 1 so calls to expect_error will return without testing.")
def expect_error(callable, args, sec=None):
"""
Execute callable(args) and assert that it generated an error.
If sec is not None, executes callable(args, sec=sec)
Skips if nhost > 1 as all hoc_execerror end in MPI_ABORT
Does not work well with nrniv launch since hoc_execerror messages do not
pass through sys.stderr.
"""
if nhost > 1:
return
old_stderr = sys.stderr
sys.stderr = my_stderr = StringIO()
err = 0
try:
if sec:
callable(*args, sec=sec)
else:
callable(*args)
except:
err=1
errmes = my_stderr.getvalue()
sys.stderr = old_stderr
if errmes:
errmes = errmes.splitlines()[0]
errmes = errmes[(errmes.find(':')+2):]
print("expect_error: %s" % errmes)
if err == 0:
print("expect_error: no err for %s%s" % (str(callable), str(args)))
assert(err)
# HGap POINT_PROCESS via ChannelBUilder.
# Cannot use with extracellular.
ks = h.KSChan(1)
ks.name("HGap")
ks.iv_type(0)
ks.gmax(0)
ks.erev(0)
# Cell with enough nonsense stuff to exercise transfer possibilities.
class Cell():
def __init__(self):
self.soma = h.Section(name="soma", cell=self)
self.soma.diam = 10.
self.soma.L = 10.
self.soma.insert("na_ion") # can use nai as transfer source
# can use POINT_PROCESS range variable as targets
self.ic = h.IClamp(self.soma(.5))
self.vc = h.SEClamp(self.soma(.5))
self.vc.rs = 1e9 # no voltage clamp current
self.hgap = [None for _ in range(2)] # filled by mkgaps
def run():
pc.setup_transfer()
h.finitialize()
h.fadvance()
model = None # Global allows teardown of model
def teardown():
"""
destroy model
"""
global model
pc.gid_clear()
model = None
def mkmodel(ncell):
"""
Destroy existing model and re-create with ncell Cells.
"""
global model
if model:
teardown()
cells = {}
for gid in range(rank, ncell, nhost):
cells[gid] = Cell()
pc.set_gid2node(gid, rank)
pc.cell(gid, h.NetCon(cells[gid].soma(.5)._ref_v, None, sec=cells[gid].soma))
model = (cells, ncell)
def mkgaps(gids):
''' For list of gids, full gap, right to left '''
gidset = set()
for gid in gids:
g = [gid, (gid + 1)%model[1]]
sids = [i + 1000 for i in g]
for i, j in enumerate([1,0]):
if pc.gid_exists(g[i]):
cell = model[0][g[i]]
if g[i] not in gidset: # source var sid cannot be used twice
pc.source_var(cell.soma(.5)._ref_v, sids[i], sec=cell.soma)
gidset.add(g[i])
assert(cell.hgap[j] is None)
cell.hgap[j] = h.HGap(cell.soma(.5))
pc.target_var(cell.hgap[j], cell.hgap[j]._ref_e, sids[j])
cell.hgap[j].gmax = 0.0001
def transfer1(amp1=True):
"""
round robin transfer v to ic.amp and vc.amp1, nai to vc.amp2
"""
ncell = model[1]
for gid, cell in model[0].items():
s = cell.soma
srcsid = gid
tarsid = (gid+1)%ncell
pc.source_var(s(.5)._ref_v, srcsid, sec=s)
pc.source_var(s(.5)._ref_nai, srcsid+ncell, sec=s)
pc.target_var(cell.ic, cell.ic._ref_amp, tarsid)
if amp1:
pc.target_var(cell.vc, cell.vc._ref_amp1, tarsid)
pc.target_var(cell.vc, cell.vc._ref_amp2, tarsid+ncell)
def init_values():
"""
Initialize sources to their sid values and targets to 0
This allows substantive test that source values make it to targets.
"""
ncell = model[1]
for gid, c in model[0].items():
c.soma(.5).v = gid
c.soma(.5).nai = gid+ncell
c.ic.amp = 0
c.vc.amp1 = 0
c.vc.amp2 = 0
def check_values():
"""
Verify that target values are equal to source values.
"""
values = {}
for gid, c in model[0].items():
vi = c.soma(.5).v
if (h.ismembrane("extracellular", sec = c.soma)):
vi += c.soma(.5).vext[0]
values[gid] = {'v':vi, 'nai':c.soma(.5).nai, 'amp':c.ic.amp, 'amp1':c.vc.amp1, 'amp2':c.vc.amp2}
x = pc.py_gather(values, 0)
if rank == 0:
values = {}
for v in x:
values.update(v)
ncell = len(values)
for gid in values:
v1 = values[gid]
v2 = values[(gid+ncell-1)%ncell]
assert(v1['v'] == v2['amp'])
assert(v1['v'] == v2['amp1'])
assert(v1['nai'] == v2['amp2'])
def test_partrans():
# no transfer targets or sources.
mkmodel(4)
run()
# invalid source or target sid.
if 0 in model[0]:
cell = model[0][0]
s = cell.soma
expect_error(pc.source_var, (s(.5)._ref_v, -1), sec=s)
expect_error(pc.target_var, (cell.ic, cell.ic._ref_amp, -1))
# target with no source.
if pc.gid_exists(1):
cell = pc.gid2cell(1)
pc.target_var(cell.ic, cell.ic._ref_amp, 1)
expect_error(run, ())
mkmodel(4)
# source with no target (not an error).
if pc.gid_exists(1):
cell = pc.gid2cell(1)
pc.source_var(cell.soma(.5)._ref_v, 1, sec=cell.soma)
run()
# No point process for target
if pc.gid_exists(1):
cell = pc.gid2cell(1)
pc.target_var(cell.vc._ref_amp3, 1)
run() # ok
pc.nthread(2)
expect_error(run, ()) # Do not know the POINT_PROCESS target
pc.nthread(1)
# Wrong sec for source ref and wrong point process for target ref.
mkmodel(1)
if pc.gid_exists(0):
cell = pc.gid2cell(0)
sec = h.Section(name="dend")
expect_error(pc.source_var, (cell.soma(.5)._ref_v, 1), sec=sec)
expect_error(pc.source_var, (cell.soma(.5)._ref_nai, 2), sec=sec)
del sec
expect_error(pc.target_var,(cell.ic, cell.vc._ref_amp3, 1))
# source sid already in use
expect_error(pc.source_var, (cell.soma(.5)._ref_nai, 1), sec=cell.soma)
# partrans update: could not find parameter index
# pv2node checks the parent
mkmodel(1)
s1 = h.Section(name="dend")
s2 = h.Section(name="soma")
ic = h.IClamp(s1(.5))
pc.source_var(s1(0)._ref_v, rank, sec=s1)
pc.target_var(ic, ic._ref_amp, rank)
run()
assert(s1(0).v == ic.amp)
'''
# but following changes the source node and things get screwed up
# because of continuing to use a freed Node*. The solution is
# beyond the scope of this pull request and would involve replacing
# description in terms of Node* with (Section*, arc_position)
s1.connect(s2(.5))
run()
print(s1(0).v, ic.amp)
assert(s1(0).v == ic.amp)
'''
# non_vsrc_update property disappears from Node*
s1.insert("pas") # not allowed to uninsert ions :(
pc.source_var(s1(.5)._ref_e_pas, rank+10, sec=s1)
pc.target_var(ic, ic._ref_delay, rank+10)
run()
assert(s1(.5).e_pas == ic.delay)
s1.uninsert("pas")
expect_error(run, ())
teardown()
del ic, s1, s2
# missing setup_transfer
mkmodel(4)
transfer1()
expect_error(h.finitialize, (-65,))
# round robin transfer v to ic.amp and vc.amp1, nai to vc.amp2
ncell = 5
mkmodel(ncell)
transfer1()
init_values()
run()
check_values()
# nrnmpi_int_alltoallv_sparse
h.nrn_sparse_partrans = 1
mkmodel(5)
transfer1()
init_values()
run()
check_values()
h.nrn_sparse_partrans = 0
# impedance error (number of gap junction not equal to number of pc.transfer_var)
imp = h.Impedance()
if 0 in model[0]:
imp.loc(model[0][0].soma(.5))
expect_error(imp.compute, (1, 1))
del imp
# For impedance, pc.target_var requires that its first arg be a reference to the POINT_PROCESS"
mkmodel(2)
if pc.gid_exists(0):
cell = pc.gid2cell(0)
pc.source_var(cell.soma(.5)._ref_v, 1000, sec=cell.soma)
cell.hgap[1] = h.HGap(cell.soma(.5))
pc.target_var(cell.hgap[1]._ref_e, 1001)
if pc.gid_exists(1):
cell = pc.gid2cell(1)
pc.source_var(cell.soma(.5)._ref_v, 1001, sec=cell.soma)
cell.hgap[0] = h.HGap(cell.soma(.5))
pc.target_var(cell.hgap[0]._ref_e, 1000)
pc.setup_transfer()
imp = h.Impedance()
h.finitialize(-65)
if pc.gid_exists(0):
imp.loc(pc.gid2cell(0).soma(.5))
expect_error(imp.compute, (10, 1, 100))
del imp, cell
# impedance
ncell = 5
mkmodel(ncell)
mkgaps(list(range(ncell-1)))
pc.setup_transfer()
imp = h.Impedance()
h.finitialize(-65)
if 0 in model[0]:
imp.loc(model[0][0].soma(.5))
niter=imp.compute(10, 1, 100)
if rank == 0:
print("impedance iterations=%d"%niter)
# tickle execution of target_ptr_update for one more line of coverage.
if 0 in model[0]:
model[0][0].hgap[1].loc(model[0][0].soma(0))
model[0][0].hgap[1].loc(model[0][0].soma(.5))
niter=imp.compute(10, 1, 100)
del imp
#CoreNEURON gap file generation
mkmodel(ncell)
transfer1()
# following is a bit tricky and need some user help in the docs.
# cannot be cache_efficient if general sparse matrix solver in effect.
cvode = h.CVode()
assert(cvode.use_mxb(0) == 0)
assert(cvode.cache_efficient(1) == 1)
pc.setup_transfer()
h.finitialize(-65)
pc.nrncore_write("tmp")
# CoreNEURON: one thread empty of gaps
mkmodel(1)
transfer1()
s = h.Section("dend")
pc.set_gid2node(rank+10, rank)
pc.cell(rank+10, h.NetCon(s(.5)._ref_v, None, sec=s))
pc.nthread(2)
pc.setup_transfer()
h.finitialize(-65)
pc.nrncore_write("tmp")
pc.nthread(1)
teardown()
del s
# There are single thread circumstances where target POINT_PROCESS is needed
s = h.Section("dend")
pc.set_gid2node(rank, rank)
pc.cell(rank, h.NetCon(s(.5)._ref_v, None, sec=s))
pc.source_var(s(.5)._ref_v, rank, sec=s)
ic=h.IClamp(s(.5))
pc.target_var(ic._ref_amp, rank)
pc.setup_transfer()
expect_error(h.finitialize, (-65,))
teardown()
del ic, s
# threads
mkmodel(ncell)
transfer1()
pc.nthread(2)
init_values()
run()
check_values()
pc.nthread(1)
# extracellular means use v = vm+vext[0]
for cell in model[0].values():
cell.soma.insert("extracellular")
init_values()
run()
check_values()
teardown()
if __name__ == "__main__":
test_partrans()
pc.barrier()
h.quit()
|
the-stack_0_15163 | import argparse
import sys
sys.setrecursionlimit(100000)
from interpreter import executeFunctions as executionFunctions
from interpreter import imageFunctions as imageWrapper
from GUI import main as GUIMain
parser = argparse.ArgumentParser(description='Interprets a piet image')
parser.add_argument("-f", "--file", required=True, type=str, help="complete filepath to a .png or .gif image")
parser.add_argument("-v", "--verbose", action="store_true", help="Outputs number of steps to STDOUT")
parser.add_argument("-g", "--graphical", action="store_true", help="Opens GUI with the file loaded")
args = parser.parse_args()
if not args.graphical:
executionFunctions.interpret(imageWrapper.getImage(args.file))
if args.verbose:
print("\nTotal steps: {}".format(executionFunctions.takeStep.counter))
else:
app = GUIMain.GUI()
app.setFileText(args.file)
app.loadFile()
app.run()
|
the-stack_0_15167 | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from enum import auto
from typing import Any, Callable, Dict, List
import regex
from bkuser_core.categories.constants import CategoryType
from bkuser_core.categories.models import ProfileCategory
from bkuser_core.common.enum import AutoLowerEnum
from bkuser_core.departments.models import Department
from bkuser_core.profiles.models import DynamicFieldInfo, Profile
from django.utils.translation import ugettext_lazy as _
class IAMCallbackMethods(AutoLowerEnum):
LIST_ATTR = auto()
LIST_ATTR_VALUE = auto()
LIST_INSTANCE = auto()
FETCH_INSTANCE_INFO = auto()
LIST_INSTANCE_BY_POLICY = auto()
SEARCH_INSTANCE = auto()
_choices_labels = (
(LIST_ATTR, "查询某个资源类型可用于配置权限的属性列表"),
(LIST_ATTR_VALUE, "获取一个资源类型某个属性的值列表"),
(LIST_INSTANCE, "根据过滤条件查询实例"),
(FETCH_INSTANCE_INFO, "批量获取资源实例详情"),
(LIST_INSTANCE_BY_POLICY, "根据策略表达式查询资源实例"),
(SEARCH_INSTANCE, "搜索资源实例"),
)
class PrincipalTypeEnum(AutoLowerEnum):
USER = auto()
class IAMAction(AutoLowerEnum):
# 用户字段
MANAGE_FIELD = auto()
VIEW_FIELD = auto()
# 审计
VIEW_AUDIT = auto()
# 目录相关
CREATE_LOCAL_CATEGORY = auto()
CREATE_LDAP_CATEGORY = auto()
CREATE_MAD_CATEGORY = auto()
CREATE_CUSTOM_CATEGORY = auto()
MANAGE_CATEGORY = auto()
VIEW_CATEGORY = auto()
# 部门
CREATE_ROOT_DEPARTMENT = auto()
MANAGE_DEPARTMENT = auto()
VIEW_DEPARTMENT = auto()
@classmethod
def get_choice_label(cls, action_id: "IAMAction") -> str:
return {
cls.MANAGE_FIELD: "用户字段管理",
cls.VIEW_FIELD: "查看字段",
cls.VIEW_AUDIT: "审计信息查看",
cls.CREATE_LOCAL_CATEGORY: "本地用户目录新建",
cls.CREATE_LDAP_CATEGORY: "LDAP目录新建",
cls.CREATE_MAD_CATEGORY: "MAD目录新建",
cls.CREATE_CUSTOM_CATEGORY: "自定义目录新建",
cls.MANAGE_CATEGORY: "目录管理",
cls.VIEW_CATEGORY: "查看目录",
cls.CREATE_ROOT_DEPARTMENT: "根组织新建",
cls.MANAGE_DEPARTMENT: "组织和成员管理",
cls.VIEW_DEPARTMENT: "组织和成员查看",
}[action_id]
@classmethod
def get_global_actions(cls) -> tuple:
"""不需要和任何资源绑定,只需要判断某人是否有某个操作的权限"""
return (
cls.VIEW_AUDIT,
cls.VIEW_FIELD,
cls.MANAGE_FIELD,
cls.CREATE_MAD_CATEGORY,
cls.CREATE_LDAP_CATEGORY,
cls.CREATE_LOCAL_CATEGORY,
cls.CREATE_CUSTOM_CATEGORY,
)
@classmethod
def get_action_by_category_type(cls, category_type: str) -> "IAMAction":
return { # type: ignore
CategoryType.LOCAL.value: cls.CREATE_LOCAL_CATEGORY,
CategoryType.LDAP.value: cls.CREATE_LDAP_CATEGORY,
CategoryType.MAD.value: cls.CREATE_MAD_CATEGORY,
}[category_type]
@classmethod
def is_global_action(cls, action_id: "IAMAction") -> bool:
for i in cls.get_global_actions():
if action_id == i:
return True
return False
@classmethod
def get_related_resource_types(cls, action_id: "IAMAction") -> list:
return {
cls.MANAGE_CATEGORY: [ResourceType.CATEGORY],
cls.VIEW_CATEGORY: [ResourceType.CATEGORY],
cls.VIEW_DEPARTMENT: [ResourceType.DEPARTMENT],
cls.MANAGE_DEPARTMENT: [ResourceType.DEPARTMENT],
cls.CREATE_ROOT_DEPARTMENT: [ResourceType.CATEGORY],
}[action_id]
class ResourceType(AutoLowerEnum):
FIELD = auto()
CATEGORY = auto()
DEPARTMENT = auto()
PROFILE = auto()
@classmethod
def get_type_name(cls, resource_type: "ResourceType") -> str:
return {
cls.FIELD: _("用户字段"),
cls.CATEGORY: _("用户目录"),
cls.DEPARTMENT: _("组织"),
cls.PROFILE: _("用户"),
}[resource_type]
@classmethod
def get_by_model(cls, instance) -> "ResourceType":
return { # type: ignore
Department: cls.DEPARTMENT,
ProfileCategory: cls.CATEGORY,
DynamicFieldInfo: cls.FIELD,
Profile: cls.PROFILE,
}[type(instance)]
@classmethod
def get_attr_by_model(cls, instance, index: int) -> str:
"""通过 model instance 获取"""
type_ = cls.get_by_model(instance)
id_name_pair = cls.get_id_name_pair(type_)
return getattr(instance, id_name_pair[index])
@classmethod
def get_attributes_mapping(cls, instance) -> dict:
"""获取模型和权限中心属性对应"""
def get_department_path_attribute(obj):
start = f"/category,{obj.category_id}/"
ancestor_ids = obj.get_ancestors(include_self=True).values_list("id", flat=True)
for ancestor_id in ancestor_ids:
start += f"department,{ancestor_id}/"
return {"_bk_iam_path_": start}
_map: Dict[Any, Callable] = {
cls.DEPARTMENT: get_department_path_attribute,
}
try:
return _map[cls.get_by_model(instance)](instance)
except KeyError:
return {}
@classmethod
def get_key_mapping(cls, resource_type: "ResourceType") -> dict:
def parse_department_path(data):
"""解析 department path"""
value = data["value"]
field_map = {"department": "parent_id", "category": "category_id"}
value_pattern = r"^\/((?P<resource_type>\w+),(?P<resource_id>\d+)\/)+"
r = regex.match(value_pattern, value).capturesdict()
r = list(zip(r["resource_type"], r["resource_id"]))
the_last_of_path = r[-1]
# 非叶子节点的策略,直接返回路径最后的 id 作为资源 id
if "node_type" in data and data["node_type"] == "non-leaf":
field_map["department"] = "id"
return field_map[the_last_of_path[0]], int(the_last_of_path[1])
_map: Dict[Any, dict] = {
cls.DEPARTMENT: {
"department.id": "id",
"department._bk_iam_path_": parse_department_path,
},
cls.CATEGORY: {"category.id": "id"},
cls.FIELD: {"field.id": "name"},
cls.PROFILE: {},
}
return _map[resource_type]
@classmethod
def get_id_name_pair(cls, resource_type: "ResourceType") -> tuple:
"""获取 id name 对"""
_map: Dict[Any, tuple] = {
cls.DEPARTMENT: ("id", "name"),
cls.CATEGORY: ("id", "display_name"),
cls.FIELD: ("id", "display_name"),
cls.PROFILE: ("id", "username"),
}
return _map[resource_type]
@classmethod
def get_instance_resource_nodes(cls, instance: Any) -> list:
"""通过数据库实例获取依赖授权路径"""
if not instance:
return []
def get_parent_nodes(i: Department) -> List[dict]:
"""获取父路径的 resource nodes"""
# 请求 callback 需要完整的资源路径
parents = i.get_ancestors(include_self=True)
d_nodes = [{"type": cls.get_by_model(d).value, "id": d.pk, "name": d.name} for d in parents]
category = ProfileCategory.objects.get(id=i.category_id)
return [
{"type": cls.CATEGORY.value, "id": category.id, "name": category.display_name},
*d_nodes,
]
special_map: Dict[Any, Callable] = {
cls.DEPARTMENT: get_parent_nodes,
}
try:
return special_map[cls.get_by_model(instance)](instance)
except KeyError:
return [
{
"type": cls.get_by_model(instance).value,
"id": instance.pk,
"name": getattr(
instance,
cls.get_constants_by_model(instance, "get_id_name_pair")[1],
),
}
]
@classmethod
def get_constants_by_model(cls, instance, target: str) -> Any:
"""通过数据模型实例来获取配置常量
:param instance: 数据模型实例
:param target: 目标方法
"""
return getattr(cls, target)(cls.get_by_model(instance))
|
the-stack_0_15168 | import socket
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#绑定
sock.bind(("0.0.0.0",5566))
#监听
sock.listen(20)
#接收
conn,addr = sock.accept();
print("address is :",addr)
data = conn.recv(1024)
print(data)
msg = """HTTP/1.1 200 OK
Content-Type:text/html
<meta charset='utf-8'>
<h1>人生苦短,我用python</h1>
"""
# 发送数据
conn.sendto(msg.encode("utf-8"),addr)
conn.close()
sock.close()
|
the-stack_0_15170 | import cv2
import numpy as np
import threading
class Webcam2rgb():
def start(self, callback, cameraNumber=0, width = None, height = None, fps = None, directShow = False):
self.callback = callback
try:
self.cam = cv2.VideoCapture(cameraNumber + cv2.CAP_DSHOW if directShow else cv2.CAP_ANY)
if not self.cam.isOpened():
print('opening camera')
self.cam.open(0)
if width:
self.cam.set(cv2.CAP_PROP_FRAME_WIDTH,width)
if height:
self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT,height)
if fps:
self.cam.set(cv2.CAP_PROP_FPS, fps)
self.running = True
self.thread = threading.Thread(target = self.calc_BRG)
self.thread.start()
self.ret_val = True
except:
self.running = False
self.ret_val = False
def stop(self):
self.running = False
self.thread.join()
def calc_BRG(self):
while self.running:
try:
self.ret_val = False
self.ret_val, img = self.cam.read()
h, w, c = img.shape
brg = img[int(h/2),int(w/2)]
self.callback(self.ret_val,brg)
except:
self.running = False
def cameraFs(self):
return self.cam.get(cv2.CAP_PROP_FPS)
|
the-stack_0_15173 | import random
from raiden.constants import ABSENT_SECRET
from raiden.settings import DEFAULT_WAIT_BEFORE_LOCK_REMOVAL
from raiden.transfer import channel, routes
from raiden.transfer.architecture import Event, TransitionResult
from raiden.transfer.events import EventPaymentSentFailed, EventPaymentSentSuccess
from raiden.transfer.identifiers import CANONICAL_IDENTIFIER_GLOBAL_QUEUE
from raiden.transfer.mediated_transfer.events import (
EventRouteFailed,
EventUnlockFailed,
EventUnlockSuccess,
SendLockedTransfer,
SendSecretReveal,
)
from raiden.transfer.mediated_transfer.state import (
InitiatorTransferState,
TransferDescriptionWithSecretState,
)
from raiden.transfer.mediated_transfer.state_change import (
ReceiveSecretRequest,
ReceiveSecretReveal,
)
from raiden.transfer.state import (
ChannelState,
NettingChannelState,
RouteState,
message_identifier_from_prng,
)
from raiden.transfer.state_change import Block, ContractReceiveSecretReveal, StateChange
from raiden.transfer.utils import is_valid_secret_reveal
from raiden.utils.typing import (
MYPY_ANNOTATION,
Address,
BlockExpiration,
BlockNumber,
ChannelID,
Dict,
List,
MessageID,
NodeNetworkStateMap,
Optional,
PaymentWithFeeAmount,
Secret,
SecretHash,
)
def events_for_unlock_lock(
initiator_state: InitiatorTransferState,
channel_state: NettingChannelState,
secret: Secret,
secrethash: SecretHash,
pseudo_random_generator: random.Random,
) -> List[Event]:
""" Unlocks the lock offchain, and emits the events for the successful payment. """
# next hop learned the secret, unlock the token locally and send the
# lock claim message to next hop
transfer_description = initiator_state.transfer_description
message_identifier = message_identifier_from_prng(pseudo_random_generator)
unlock_lock = channel.send_unlock(
channel_state=channel_state,
message_identifier=message_identifier,
payment_identifier=transfer_description.payment_identifier,
secret=secret,
secrethash=secrethash,
)
payment_sent_success = EventPaymentSentSuccess(
token_network_registry_address=channel_state.token_network_registry_address,
token_network_address=channel_state.token_network_address,
identifier=transfer_description.payment_identifier,
amount=transfer_description.amount,
target=transfer_description.target,
secret=secret,
route=initiator_state.route.route,
)
unlock_success = EventUnlockSuccess(
transfer_description.payment_identifier, transfer_description.secrethash
)
return [unlock_lock, payment_sent_success, unlock_success]
def handle_block(
initiator_state: InitiatorTransferState,
state_change: Block,
channel_state: NettingChannelState,
pseudo_random_generator: random.Random,
) -> TransitionResult[Optional[InitiatorTransferState]]:
""" Checks if the lock has expired, and if it has sends a remove expired
lock and emits the failing events.
"""
secrethash = initiator_state.transfer.lock.secrethash
locked_lock = channel_state.our_state.secrethashes_to_lockedlocks.get(secrethash)
if not locked_lock:
if channel_state.partner_state.secrethashes_to_lockedlocks.get(secrethash):
return TransitionResult(initiator_state, list())
else:
# if lock is not in our or our partner's locked locks then the
# task can go
return TransitionResult(None, list())
lock_expiration_threshold = BlockExpiration(
locked_lock.expiration + DEFAULT_WAIT_BEFORE_LOCK_REMOVAL
)
lock_has_expired = channel.is_lock_expired(
end_state=channel_state.our_state,
lock=locked_lock,
block_number=state_change.block_number,
lock_expiration_threshold=lock_expiration_threshold,
)
events: List[Event] = list()
if lock_has_expired and initiator_state.transfer_state != "transfer_expired":
is_channel_open = channel.get_status(channel_state) == ChannelState.STATE_OPENED
if is_channel_open:
expired_lock_events = channel.send_lock_expired(
channel_state=channel_state,
locked_lock=locked_lock,
pseudo_random_generator=pseudo_random_generator,
)
events.extend(expired_lock_events)
if initiator_state.received_secret_request:
reason = "bad secret request message from target"
else:
reason = "lock expired"
transfer_description = initiator_state.transfer_description
payment_identifier = transfer_description.payment_identifier
# TODO: When we introduce multiple transfers per payment this needs to be
# reconsidered. As we would want to try other routes once a route
# has failed, and a transfer failing does not mean the entire payment
# would have to fail.
# Related issue: https://github.com/raiden-network/raiden/issues/2329
payment_failed = EventPaymentSentFailed(
token_network_registry_address=transfer_description.token_network_registry_address,
token_network_address=transfer_description.token_network_address,
identifier=payment_identifier,
target=transfer_description.target,
reason=reason,
)
route_failed = EventRouteFailed(
secrethash=secrethash,
route=initiator_state.route.route,
token_network_address=transfer_description.token_network_address,
)
unlock_failed = EventUnlockFailed(
identifier=payment_identifier,
secrethash=initiator_state.transfer_description.secrethash,
reason=reason,
)
lock_exists = channel.lock_exists_in_either_channel_side(
channel_state=channel_state, secrethash=secrethash
)
initiator_state.transfer_state = "transfer_expired"
return TransitionResult(
# If the lock is either in our state or partner state we keep the
# task around to wait for the LockExpired messages to sync.
# Check https://github.com/raiden-network/raiden/issues/3183
initiator_state if lock_exists else None,
events + [payment_failed, route_failed, unlock_failed],
)
else:
return TransitionResult(initiator_state, events)
def try_new_route(
channelidentifiers_to_channels: Dict[ChannelID, NettingChannelState],
nodeaddresses_to_networkstates: NodeNetworkStateMap,
candidate_route_states: List[RouteState],
transfer_description: TransferDescriptionWithSecretState,
pseudo_random_generator: random.Random,
block_number: BlockNumber,
) -> TransitionResult[Optional[InitiatorTransferState]]:
initiator_state = None
events: List[Event] = list()
amount_with_fee: PaymentWithFeeAmount = PaymentWithFeeAmount(
transfer_description.amount + transfer_description.allocated_fee
)
channel_state = None
route_state = None
reachable_route_states = routes.filter_reachable_routes(
candidate_route_states, nodeaddresses_to_networkstates
)
for reachable_route_state in reachable_route_states:
forward_channel_id = reachable_route_state.forward_channel_id
candidate_channel_state = forward_channel_id and channelidentifiers_to_channels.get(
forward_channel_id
)
assert isinstance(candidate_channel_state, NettingChannelState)
is_channel_usable = channel.is_channel_usable_for_new_transfer(
channel_state=candidate_channel_state,
transfer_amount=amount_with_fee,
lock_timeout=transfer_description.locktimeout,
)
if is_channel_usable:
channel_state = candidate_channel_state
route_state = reachable_route_state
break
if route_state is None:
if not reachable_route_states:
reason = "there is no route available"
else:
reason = "none of the available routes could be used"
transfer_failed = EventPaymentSentFailed(
token_network_registry_address=transfer_description.token_network_registry_address,
token_network_address=transfer_description.token_network_address,
identifier=transfer_description.payment_identifier,
target=transfer_description.target,
reason=reason,
)
events.append(transfer_failed)
initiator_state = None
else:
assert channel_state is not None
message_identifier = message_identifier_from_prng(pseudo_random_generator)
lockedtransfer_event = send_lockedtransfer(
transfer_description=transfer_description,
channel_state=channel_state,
message_identifier=message_identifier,
block_number=block_number,
route_state=route_state,
route_states=reachable_route_states,
)
assert lockedtransfer_event
initiator_state = InitiatorTransferState(
route=route_state,
transfer_description=transfer_description,
channel_identifier=channel_state.identifier,
transfer=lockedtransfer_event.transfer,
)
events.append(lockedtransfer_event)
return TransitionResult(initiator_state, events)
def send_lockedtransfer(
transfer_description: TransferDescriptionWithSecretState,
channel_state: NettingChannelState,
message_identifier: MessageID,
block_number: BlockNumber,
route_state: RouteState,
route_states: List[RouteState],
) -> SendLockedTransfer:
""" Create a mediated transfer using channel. """
assert channel_state.token_network_address == transfer_description.token_network_address
lock_expiration = channel.get_safe_initial_expiration(
block_number, channel_state.reveal_timeout, transfer_description.locktimeout
)
# The payment amount and the fee amount must be included in the locked
# amount, as a guarantee to the mediator that the fee will be claimable
# on-chain.
total_amount = PaymentWithFeeAmount(
transfer_description.amount + transfer_description.allocated_fee
)
lockedtransfer_event = channel.send_lockedtransfer(
channel_state=channel_state,
initiator=transfer_description.initiator,
target=transfer_description.target,
amount=total_amount,
message_identifier=message_identifier,
payment_identifier=transfer_description.payment_identifier,
expiration=lock_expiration,
secrethash=transfer_description.secrethash,
route_states=routes.prune_route_table(
route_states=route_states, selected_route=route_state
),
)
return lockedtransfer_event
def handle_secretrequest(
initiator_state: InitiatorTransferState,
state_change: ReceiveSecretRequest,
channel_state: NettingChannelState,
pseudo_random_generator: random.Random,
) -> TransitionResult[InitiatorTransferState]:
is_message_from_target = (
state_change.sender == initiator_state.transfer_description.target
and state_change.secrethash == initiator_state.transfer_description.secrethash
and state_change.payment_identifier
== initiator_state.transfer_description.payment_identifier
)
lock = channel.get_lock(
channel_state.our_state, initiator_state.transfer_description.secrethash
)
# This should not ever happen. This task clears itself when the lock is
# removed.
assert lock is not None, "channel is does not have the transfer's lock"
already_received_secret_request = initiator_state.received_secret_request
# lock.amount includes the fees, transfer_description.amount is the actual
# payment amount, for the transfer to be valid and the unlock allowed the
# target must receive an amount between these values.
is_valid_secretrequest = (
state_change.amount <= lock.amount
and state_change.amount >= initiator_state.transfer_description.amount
and state_change.expiration == lock.expiration
and initiator_state.transfer_description.secret != ABSENT_SECRET
)
if already_received_secret_request and is_message_from_target:
# A secret request was received earlier, all subsequent are ignored
# as it might be an attack
iteration = TransitionResult(initiator_state, list())
elif is_valid_secretrequest and is_message_from_target:
# Reveal the secret to the target node and wait for its confirmation.
# At this point the transfer is not cancellable anymore as either the lock
# timeouts or a secret reveal is received.
#
# Note: The target might be the first hop
#
message_identifier = message_identifier_from_prng(pseudo_random_generator)
transfer_description = initiator_state.transfer_description
recipient = transfer_description.target
revealsecret = SendSecretReveal(
recipient=Address(recipient),
message_identifier=message_identifier,
secret=transfer_description.secret,
canonical_identifier=CANONICAL_IDENTIFIER_GLOBAL_QUEUE,
)
initiator_state.transfer_state = "transfer_secret_revealed"
initiator_state.received_secret_request = True
iteration = TransitionResult(initiator_state, [revealsecret])
elif not is_valid_secretrequest and is_message_from_target:
initiator_state.received_secret_request = True
iteration = TransitionResult(initiator_state, list())
else:
iteration = TransitionResult(initiator_state, list())
return iteration
def handle_offchain_secretreveal(
initiator_state: InitiatorTransferState,
state_change: ReceiveSecretReveal,
channel_state: NettingChannelState,
pseudo_random_generator: random.Random,
) -> TransitionResult[Optional[InitiatorTransferState]]:
""" Once the next hop proves it knows the secret, the initiator can unlock
the mediated transfer.
This will validate the secret, and if valid a new balance proof is sent to
the next hop with the current lock removed from the pending locks and the
transferred amount updated.
"""
iteration: TransitionResult[Optional[InitiatorTransferState]]
valid_reveal = is_valid_secret_reveal(
state_change=state_change,
transfer_secrethash=initiator_state.transfer_description.secrethash,
)
sent_by_partner = state_change.sender == channel_state.partner_state.address
is_channel_open = channel.get_status(channel_state) == ChannelState.STATE_OPENED
if valid_reveal and is_channel_open and sent_by_partner:
events = events_for_unlock_lock(
initiator_state=initiator_state,
channel_state=channel_state,
secret=state_change.secret,
secrethash=state_change.secrethash,
pseudo_random_generator=pseudo_random_generator,
)
iteration = TransitionResult(None, events)
else:
events = list()
iteration = TransitionResult(initiator_state, events)
return iteration
def handle_onchain_secretreveal(
initiator_state: InitiatorTransferState,
state_change: ContractReceiveSecretReveal,
channel_state: NettingChannelState,
pseudo_random_generator: random.Random,
) -> TransitionResult[Optional[InitiatorTransferState]]:
""" When a secret is revealed on-chain all nodes learn the secret.
This check the on-chain secret corresponds to the one used by the
initiator, and if valid a new balance proof is sent to the next hop with
the current lock removed from the pending locks and the transferred amount
updated.
"""
iteration: TransitionResult[Optional[InitiatorTransferState]]
secret = state_change.secret
secrethash = initiator_state.transfer_description.secrethash
is_valid_secret = is_valid_secret_reveal(
state_change=state_change, transfer_secrethash=secrethash
)
is_channel_open = channel.get_status(channel_state) == ChannelState.STATE_OPENED
is_lock_expired = state_change.block_number > initiator_state.transfer.lock.expiration
is_lock_unlocked = is_valid_secret and not is_lock_expired
if is_lock_unlocked:
channel.register_onchain_secret(
channel_state=channel_state,
secret=secret,
secrethash=secrethash,
secret_reveal_block_number=state_change.block_number,
)
if is_lock_unlocked and is_channel_open:
events = events_for_unlock_lock(
initiator_state,
channel_state,
state_change.secret,
state_change.secrethash,
pseudo_random_generator,
)
iteration = TransitionResult(None, events)
else:
events = list()
iteration = TransitionResult(initiator_state, events)
return iteration
def state_transition(
initiator_state: InitiatorTransferState,
state_change: StateChange,
channel_state: NettingChannelState,
pseudo_random_generator: random.Random,
) -> TransitionResult[Optional[InitiatorTransferState]]:
if type(state_change) == Block:
assert isinstance(state_change, Block), MYPY_ANNOTATION
iteration = handle_block(
initiator_state, state_change, channel_state, pseudo_random_generator
)
elif type(state_change) == ReceiveSecretRequest:
assert isinstance(state_change, ReceiveSecretRequest), MYPY_ANNOTATION
iteration = handle_secretrequest(
initiator_state, state_change, channel_state, pseudo_random_generator
)
elif type(state_change) == ReceiveSecretReveal:
assert isinstance(state_change, ReceiveSecretReveal), MYPY_ANNOTATION
iteration = handle_offchain_secretreveal(
initiator_state, state_change, channel_state, pseudo_random_generator
)
elif type(state_change) == ContractReceiveSecretReveal:
assert isinstance(state_change, ContractReceiveSecretReveal), MYPY_ANNOTATION
iteration = handle_onchain_secretreveal(
initiator_state, state_change, channel_state, pseudo_random_generator
)
else:
iteration = TransitionResult(initiator_state, list())
return iteration
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.