ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a52024c22a19429383f627a808d5c8c6e94e75b | # qubit number=4
# total number=41
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=13
prog.h(input_qubit[3]) # number=23
prog.cz(input_qubit[0],input_qubit[3]) # number=24
prog.y(input_qubit[1]) # number=37
prog.h(input_qubit[3]) # number=25
prog.x(input_qubit[3]) # number=18
prog.cx(input_qubit[0],input_qubit[3]) # number=19
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=32
prog.cx(input_qubit[3],input_qubit[0]) # number=20
prog.h(input_qubit[0]) # number=38
prog.cz(input_qubit[3],input_qubit[0]) # number=39
prog.h(input_qubit[0]) # number=40
prog.z(input_qubit[3]) # number=27
prog.h(input_qubit[0]) # number=29
prog.cz(input_qubit[3],input_qubit[0]) # number=30
prog.h(input_qubit[0]) # number=31
prog.h(input_qubit[0]) # number=33
prog.cz(input_qubit[3],input_qubit[0]) # number=34
prog.h(input_qubit[0]) # number=35
prog.h(input_qubit[2]) # number=36
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC2805.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
py | 1a5204415090e54bf0f1e47c1d47d33353d0e2eb | import os
import platform
import sys
from methods import get_compiler_version, using_gcc, using_clang
def is_active():
return True
def get_name():
return "X11"
def can_build():
if os.name != "posix" or sys.platform == "darwin":
return False
# Check the minimal dependencies
x11_error = os.system("pkg-config --version > /dev/null")
if x11_error:
return False
x11_error = os.system("pkg-config x11 --modversion > /dev/null ")
if x11_error:
return False
x11_error = os.system("pkg-config xcursor --modversion > /dev/null ")
if x11_error:
print("xcursor not found.. x11 disabled.")
return False
x11_error = os.system("pkg-config xinerama --modversion > /dev/null ")
if x11_error:
print("xinerama not found.. x11 disabled.")
return False
x11_error = os.system("pkg-config xext --modversion > /dev/null ")
if x11_error:
print("xext not found.. x11 disabled.")
return False
x11_error = os.system("pkg-config xrandr --modversion > /dev/null ")
if x11_error:
print("xrandr not found.. x11 disabled.")
return False
x11_error = os.system("pkg-config xrender --modversion > /dev/null ")
if x11_error:
print("xrender not found.. x11 disabled.")
return False
x11_error = os.system("pkg-config xi --modversion > /dev/null ")
if x11_error:
print("xi not found.. Aborting.")
return False
return True
def get_opts():
from SCons.Variables import BoolVariable, EnumVariable
return [
BoolVariable("use_llvm", "Use the LLVM compiler", False),
BoolVariable("use_lld", "Use the LLD linker", False),
BoolVariable("use_thinlto", "Use ThinLTO", False),
BoolVariable("use_static_cpp", "Link libgcc and libstdc++ statically for better portability", True),
BoolVariable("use_ubsan", "Use LLVM/GCC compiler undefined behavior sanitizer (UBSAN)", False),
BoolVariable("use_asan", "Use LLVM/GCC compiler address sanitizer (ASAN))", False),
BoolVariable("use_lsan", "Use LLVM/GCC compiler leak sanitizer (LSAN))", False),
BoolVariable("use_tsan", "Use LLVM/GCC compiler thread sanitizer (TSAN))", False),
BoolVariable("use_msan", "Use LLVM/GCC compiler memory sanitizer (MSAN))", False),
BoolVariable("pulseaudio", "Detect and use PulseAudio", True),
BoolVariable("udev", "Use udev for gamepad connection callbacks", True),
BoolVariable("debug_symbols", "Add debugging symbols to release/release_debug builds", True),
BoolVariable("separate_debug_symbols", "Create a separate file containing debugging symbols", False),
BoolVariable("touch", "Enable touch events", True),
BoolVariable("execinfo", "Use libexecinfo on systems where glibc is not available", False),
]
def get_flags():
return []
def configure(env):
## Build type
if env["target"] == "release":
if env["optimize"] == "speed": # optimize for speed (default)
env.Prepend(CCFLAGS=["-O3"])
elif env["optimize"] == "size": # optimize for size
env.Prepend(CCFLAGS=["-Os"])
if env["debug_symbols"]:
env.Prepend(CCFLAGS=["-g2"])
elif env["target"] == "release_debug":
if env["optimize"] == "speed": # optimize for speed (default)
env.Prepend(CCFLAGS=["-O2"])
elif env["optimize"] == "size": # optimize for size
env.Prepend(CCFLAGS=["-Os"])
env.Prepend(CPPDEFINES=["DEBUG_ENABLED"])
if env["debug_symbols"]:
env.Prepend(CCFLAGS=["-g2"])
elif env["target"] == "debug":
env.Prepend(CCFLAGS=["-ggdb"])
env.Prepend(CCFLAGS=["-g3"])
env.Prepend(CPPDEFINES=["DEBUG_ENABLED"])
env.Append(LINKFLAGS=["-rdynamic"])
## Architecture
is64 = sys.maxsize > 2 ** 32
if env["bits"] == "default":
env["bits"] = "64" if is64 else "32"
## Compiler configuration
if "CXX" in env and "clang" in os.path.basename(env["CXX"]):
# Convenience check to enforce the use_llvm overrides when CXX is clang(++)
env["use_llvm"] = True
if env["use_llvm"]:
if "clang++" not in os.path.basename(env["CXX"]):
env["CC"] = "clang"
env["CXX"] = "clang++"
env.extra_suffix = ".llvm" + env.extra_suffix
if env["use_lld"]:
if env["use_llvm"]:
env.Append(LINKFLAGS=["-fuse-ld=lld"])
if env["use_thinlto"]:
# A convenience so you don't need to write use_lto too when using SCons
env["use_lto"] = True
else:
print("Using LLD with GCC is not supported yet, try compiling with 'use_llvm=yes'.")
sys.exit(255)
if env["use_ubsan"] or env["use_asan"] or env["use_lsan"] or env["use_tsan"] or env["use_msan"]:
env.extra_suffix += "s"
if env["use_ubsan"]:
env.Append(
CCFLAGS=[
"-fsanitize=undefined,shift,shift-exponent,integer-divide-by-zero,unreachable,vla-bound,null,return,signed-integer-overflow,bounds,float-divide-by-zero,float-cast-overflow,nonnull-attribute,returns-nonnull-attribute,bool,enum,vptr,pointer-overflow,builtin"
]
)
if env["use_llvm"]:
env.Append(
CCFLAGS=[
"-fsanitize=nullability-return,nullability-arg,function,nullability-assign,implicit-integer-sign-change,implicit-signed-integer-truncation,implicit-unsigned-integer-truncation"
]
)
else:
env.Append(CCFLAGS=["-fsanitize=bounds-strict"])
env.Append(LINKFLAGS=["-fsanitize=undefined"])
if env["use_asan"]:
env.Append(CCFLAGS=["-fsanitize=address,pointer-subtract,pointer-compare"])
env.Append(LINKFLAGS=["-fsanitize=address"])
if env["use_lsan"]:
env.Append(CCFLAGS=["-fsanitize=leak"])
env.Append(LINKFLAGS=["-fsanitize=leak"])
if env["use_tsan"]:
env.Append(CCFLAGS=["-fsanitize=thread"])
env.Append(LINKFLAGS=["-fsanitize=thread"])
if env["use_msan"]:
env.Append(CCFLAGS=["-fsanitize=memory"])
env.Append(LINKFLAGS=["-fsanitize=memory"])
if env["use_lto"]:
if not env["use_llvm"] and env.GetOption("num_jobs") > 1:
env.Append(CCFLAGS=["-flto"])
env.Append(LINKFLAGS=["-flto=" + str(env.GetOption("num_jobs"))])
else:
if env["use_lld"] and env["use_thinlto"]:
env.Append(CCFLAGS=["-flto=thin"])
env.Append(LINKFLAGS=["-flto=thin"])
else:
env.Append(CCFLAGS=["-flto"])
env.Append(LINKFLAGS=["-flto"])
if not env["use_llvm"]:
env["RANLIB"] = "gcc-ranlib"
env["AR"] = "gcc-ar"
env.Append(CCFLAGS=["-pipe"])
env.Append(LINKFLAGS=["-pipe"])
# Check for gcc version >= 6 before adding -no-pie
version = get_compiler_version(env) or [-1, -1]
if using_gcc(env):
if version[0] >= 6:
env.Append(CCFLAGS=["-fpie"])
env.Append(LINKFLAGS=["-no-pie"])
# Do the same for clang should be fine with Clang 4 and higher
if using_clang(env):
if version[0] >= 4:
env.Append(CCFLAGS=["-fpie"])
env.Append(LINKFLAGS=["-no-pie"])
## Dependencies
env.ParseConfig("pkg-config x11 --cflags --libs")
env.ParseConfig("pkg-config xcursor --cflags --libs")
env.ParseConfig("pkg-config xinerama --cflags --libs")
env.ParseConfig("pkg-config xext --cflags --libs")
env.ParseConfig("pkg-config xrandr --cflags --libs")
env.ParseConfig("pkg-config xrender --cflags --libs")
env.ParseConfig("pkg-config xi --cflags --libs")
if env["touch"]:
env.Append(CPPDEFINES=["TOUCH_ENABLED"])
# FIXME: Check for existence of the libs before parsing their flags with pkg-config
# freetype depends on libpng and zlib, so bundling one of them while keeping others
# as shared libraries leads to weird issues
if env["builtin_freetype"] or env["builtin_libpng"] or env["builtin_zlib"]:
env["builtin_freetype"] = True
env["builtin_libpng"] = True
env["builtin_zlib"] = True
if not env["builtin_freetype"]:
env.ParseConfig("pkg-config freetype2 --cflags --libs")
if not env["builtin_libpng"]:
env.ParseConfig("pkg-config libpng16 --cflags --libs")
if not env["builtin_bullet"]:
# We need at least version 2.89
import subprocess
bullet_version = subprocess.check_output(["pkg-config", "bullet", "--modversion"]).strip()
if str(bullet_version) < "2.89":
# Abort as system bullet was requested but too old
print(
"Bullet: System version {0} does not match minimal requirements ({1}). Aborting.".format(
bullet_version, "2.89"
)
)
sys.exit(255)
env.ParseConfig("pkg-config bullet --cflags --libs")
if False: # not env['builtin_assimp']:
# FIXME: Add min version check
env.ParseConfig("pkg-config assimp --cflags --libs")
if not env["builtin_enet"]:
env.ParseConfig("pkg-config libenet --cflags --libs")
if not env["builtin_squish"]:
env.ParseConfig("pkg-config libsquish --cflags --libs")
if not env["builtin_zstd"]:
env.ParseConfig("pkg-config libzstd --cflags --libs")
# Sound and video libraries
# Keep the order as it triggers chained dependencies (ogg needed by others, etc.)
if not env["builtin_libtheora"]:
env["builtin_libogg"] = False # Needed to link against system libtheora
env["builtin_libvorbis"] = False # Needed to link against system libtheora
env.ParseConfig("pkg-config theora theoradec --cflags --libs")
else:
list_of_x86 = ["x86_64", "x86", "i386", "i586"]
if any(platform.machine() in s for s in list_of_x86):
env["x86_libtheora_opt_gcc"] = True
if not env["builtin_libvpx"]:
env.ParseConfig("pkg-config vpx --cflags --libs")
if not env["builtin_libvorbis"]:
env["builtin_libogg"] = False # Needed to link against system libvorbis
env.ParseConfig("pkg-config vorbis vorbisfile --cflags --libs")
if not env["builtin_opus"]:
env["builtin_libogg"] = False # Needed to link against system opus
env.ParseConfig("pkg-config opus opusfile --cflags --libs")
if not env["builtin_libogg"]:
env.ParseConfig("pkg-config ogg --cflags --libs")
if not env["builtin_libwebp"]:
env.ParseConfig("pkg-config libwebp --cflags --libs")
if not env["builtin_mbedtls"]:
# mbedTLS does not provide a pkgconfig config yet. See https://github.com/ARMmbed/mbedtls/issues/228
env.Append(LIBS=["mbedtls", "mbedcrypto", "mbedx509"])
if not env["builtin_wslay"]:
env.ParseConfig("pkg-config libwslay --cflags --libs")
if not env["builtin_miniupnpc"]:
# No pkgconfig file so far, hardcode default paths.
env.Prepend(CPPPATH=["/usr/include/miniupnpc"])
env.Append(LIBS=["miniupnpc"])
# On Linux wchar_t should be 32-bits
# 16-bit library shouldn't be required due to compiler optimisations
if not env["builtin_pcre2"]:
env.ParseConfig("pkg-config libpcre2-32 --cflags --libs")
# Embree is only compatible with x86_64. Yet another unreliable hack that will break
# cross-compilation, this will really need to be handle better. Thankfully only affects
# people who disable builtin_embree (likely distro packagers).
if env["tools"] and not env["builtin_embree"] and (is64 and platform.machine() == "x86_64"):
# No pkgconfig file so far, hardcode expected lib name.
env.Append(LIBS=["embree3"])
## Flags
if os.system("pkg-config --exists alsa") == 0: # 0 means found
print("Enabling ALSA")
env["alsa"] = True
env.Append(CPPDEFINES=["ALSA_ENABLED", "ALSAMIDI_ENABLED"])
else:
print("ALSA libraries not found, disabling driver")
if env["pulseaudio"]:
if os.system("pkg-config --exists libpulse") == 0: # 0 means found
print("Enabling PulseAudio")
env.Append(CPPDEFINES=["PULSEAUDIO_ENABLED"])
env.ParseConfig("pkg-config --cflags libpulse")
else:
print("PulseAudio development libraries not found, disabling driver")
if platform.system() == "Linux":
env.Append(CPPDEFINES=["JOYDEV_ENABLED"])
if env["udev"]:
if os.system("pkg-config --exists libudev") == 0: # 0 means found
print("Enabling udev support")
env.Append(CPPDEFINES=["UDEV_ENABLED"])
else:
print("libudev development libraries not found, disabling udev support")
else:
env["udev"] = False # Linux specific
# Linkflags below this line should typically stay the last ones
if not env["builtin_zlib"]:
env.ParseConfig("pkg-config zlib --cflags --libs")
env.Prepend(CPPPATH=["#platform/x11"])
env.Append(CPPDEFINES=["X11_ENABLED", "UNIX_ENABLED", "OPENGL_ENABLED", "GLES_ENABLED"])
env.Append(LIBS=["GL", "pthread"])
if platform.system() == "Linux":
env.Append(LIBS=["dl"])
if platform.system().find("BSD") >= 0:
env["execinfo"] = True
if env["execinfo"]:
env.Append(LIBS=["execinfo"])
if not env["tools"]:
import subprocess
import re
linker_version_str = subprocess.check_output([env.subst(env["LINK"]), "-Wl,--version"]).decode("utf-8")
gnu_ld_version = re.search("^GNU ld [^$]*(\d+\.\d+)$", linker_version_str, re.MULTILINE)
if not gnu_ld_version:
print(
"Warning: Creating template binaries enabled for PCK embedding is currently only supported with GNU ld"
)
else:
if float(gnu_ld_version.group(1)) >= 2.30:
env.Append(LINKFLAGS=["-T", "platform/x11/pck_embed.ld"])
else:
env.Append(LINKFLAGS=["-T", "platform/x11/pck_embed.legacy.ld"])
## Cross-compilation
if is64 and env["bits"] == "32":
env.Append(CCFLAGS=["-m32"])
env.Append(LINKFLAGS=["-m32", "-L/usr/lib/i386-linux-gnu"])
elif not is64 and env["bits"] == "64":
env.Append(CCFLAGS=["-m64"])
env.Append(LINKFLAGS=["-m64", "-L/usr/lib/i686-linux-gnu"])
# Link those statically for portability
if env["use_static_cpp"]:
# Workaround for GH-31743, Ubuntu 18.04 i386 crashes when it's used.
# That doesn't make any sense but it's likely a Ubuntu bug?
if is64 or env["bits"] == "64":
env.Append(LINKFLAGS=["-static-libgcc", "-static-libstdc++"])
if env["use_llvm"]:
env["LINKCOM"] = env["LINKCOM"] + " -l:libatomic.a"
else:
if env["use_llvm"]:
env.Append(LIBS=["atomic"])
|
py | 1a5204c4971d673f3207eb5bd15b9e0ab27aa4db | class News:
'''
News class to define News Objects
'''
def __init__(self, name, description, url, urlToImage, content):
self.name = name
self.description = description
self.url = url
self.urlToImage = urlToImage
self.content = content
class Sources:
'''
Sources class to define Sources objects
'''
def __init__(self, id, name, description, url, category):
self.id = id
self.name = name
self.description = description
self.url = url
self.category = category
|
py | 1a5206f9f006fae241a0a2f80ae50c3b32516a99 | """
Created on Sat Nov 30 11:12:39 2019
@author: Bogdan
"""
import os
import sys
import numpy as np
from scipy.signal import convolve2d
project_path = os.getcwd()
while os.path.basename(project_path) != 'image-tinkering':
project_path = os.path.dirname(project_path)
sys.path.append(project_path)
from backend import utils
def apply_kernel(image, kernel):
""" Performs convolution between the given image and kernel """
if utils.is_color(image):
result_b = convolve2d(image[:, :, 0], kernel, mode='same', fillvalue=np.median(image[:, :, 0]))
result_g = convolve2d(image[:, :, 1], kernel, mode='same', fillvalue=np.median(image[:, :, 1]))
result_r = convolve2d(image[:, :, 2], kernel, mode='same', fillvalue=np.median(image[:, :, 2]))
channels_list = []
# Trim values lower than 0 or higher than 255 and convert to uint8 for openCV compatibility
for channel in 'bgr':
underflow_mask = locals()['result_' + channel] < 0
result_temp = np.where(underflow_mask, 0, locals()['result_' + channel])
result_temp = np.where(result_temp > 255, 255, result_temp)
result_temp = result_temp.astype(np.uint8)
channels_list.append(result_temp)
filtered_image = utils.merge_channels(channels_list)
else:
# Trim values lower than 0 or higher than 255 and convert to uint8 for openCV compatibility
filtered_image = convolve2d(image, kernel, mode='same')
filtered_image = np.where(filtered_image < 0, 0, filtered_image)
filtered_image = np.where(filtered_image > 255, 255, filtered_image)
filtered_image = filtered_image.astype(np.uint8)
return filtered_image
def generate_box_kernel(size):
""" Generates a kernel having the given size and giving equal weights to all
elements surrounding the current pixel """
return (1 / size ** 2) * np.ones((size, size), dtype=np.uint8)
def generate_gaussian_kernel(size, sigma=3):
""" Generates an one-sum kernel having the given size, containing samples
from a gaussian distribution having the given standard deviation """
size = size // 2
x, y = np.mgrid[-size: size + 1, -size: size + 1]
normalization_factor = 1 / (2.0 * np.pi * sigma**2)
g = np.exp(-((x ** 2 + y ** 2) / (2.0 * sigma ** 2))) * normalization_factor
g = g / (np.sum(g)) # Normalize the kernel so the sum of elements is 1
return g
def get_thresholds(image, method, kernel_size):
""" Performs convolution between the image and the kernel of the specified
size. The resulting values are the thresholds used in binarization """
if method == 'mean':
kernel = generate_box_kernel(kernel_size)
else:
kernel = generate_gaussian_kernel(kernel_size)
thresholds = apply_kernel(image, kernel)
return thresholds
def generate_emboss_kernels(size, direction, kernel_type):
""" Generates the kernels of the specified type (mask or filter), size and
direction, needed for the embossing operation """
kernel1 = np.zeros((size, size), dtype=np.int8)
if direction == 'horizontal':
kernel1[: size // 2, size // 2] = 1
kernel1[size // 2 + 1:, size // 2] = -1
if kernel_type == 'filter':
kernel1[size // 2, size // 2] = 1
kernel2 = np.flipud(kernel1)
elif direction == 'vertical':
kernel1[size // 2, : size // 2] = 1
kernel1[size // 2, size // 2 + 1:] = -1
if kernel_type == 'filter':
kernel1[size // 2, size // 2] = 1
kernel2 = np.fliplr(kernel1)
else:
for i in range(size):
if i < size // 2:
kernel1[i, i] = 1
elif i > size // 2:
kernel1[i, i] = -1
elif kernel_type == 'filter':
kernel1[i, i] = 1
kernel2 = np.flipud(kernel1)
return kernel1, kernel2
|
py | 1a520702279ea5ca3428f2cc590d135576ee8931 | from kivy.base import runTouchApp
from kivy.lang import Builder
from kivy.garden.knob import Knob
# LOAD KV UIX
runTouchApp(Builder.load_file('example.kv'))
|
py | 1a520740a6df029fee159abb8f47f0d56462a87e | """
fs.expose.dokan
===============
Expose an FS object to the native filesystem via Dokan.
This module provides the necessary interfaces to mount an FS object into
the local filesystem using Dokan on win32::
http://dokan-dev.github.io/
For simple usage, the function 'mount' takes an FS object
and new device mount point or an existing empty folder
and exposes the given FS as that path::
>>> from fs.memoryfs import MemoryFS
>>> from fs.expose import dokan
>>> fs = MemoryFS()
>>> # Mount device mount point
>>> mp = dokan.mount(fs, "Q:\\")
>>> mp.path
'Q:\\'
>>> mp.unmount()
>>> fs = MemoryFS()
>>> # Mount in an existing empty folder.
>>> mp = dokan.mount(fs, "C:\\test")
>>> mp.path
'C:\\test'
>>> mp.unmount()
The above spawns a new background process to manage the Dokan event loop, which
can be controlled through the returned subprocess.Popen object. To avoid
spawning a new process, set the 'foreground' option::
>>> # This will block until the filesystem is unmounted
>>> dokan.mount(fs, "Q:\\", foreground=True)
Any additional options for the Dokan process can be passed as keyword arguments
to the 'mount' function.
If you require finer control over the creation of the Dokan process, you can
instantiate the MountProcess class directly. It accepts all options available
to subprocess.Popen::
>>> from subprocess import PIPE
>>> mp = dokan.MountProcess(fs, "Q:\\", stderr=PIPE)
>>> dokan_errors = mp.communicate()[1]
If you are exposing an untrusted filesystem, you may like to apply the
wrapper class Win32SafetyFS before passing it into dokan. This will take
a number of steps to avoid suspicious operations on windows, such as
hiding autorun files.
The binding to Dokan is created via ctypes. Due to the very stable ABI of
win32, this should work without further configuration on just about all
systems with Dokan installed.
"""
# Copyright (c) 2009-2010, Cloud Matrix Pty. Ltd.
# Copyright (c) 2016-2016, Adrien J. <[email protected]>.
# All rights reserved; available under the terms of the MIT License.
from __future__ import with_statement, absolute_import
import six
import sys
import os
import errno
import time
import stat as statinfo
import subprocess
try:
import cPickle as pickle
except ImportError:
import pickle
import datetime
import ctypes
from collections import deque
from six.moves import range
from fs.base import threading
from fs.errors import *
from fs.path import *
from fs.local_functools import wraps
from fs.wrapfs import WrapFS
try:
from . import libdokan
except (NotImplementedError, EnvironmentError, ImportError, NameError,):
is_available = False
sys.modules.pop("fs.expose.dokan.libdokan", None)
libdokan = None
else:
is_available = True
from ctypes.wintypes import LPCWSTR, WCHAR
kernel32 = ctypes.windll.kernel32
import logging
logger = logging.getLogger("fs.expose.dokan")
# Options controlling the behavior of the Dokan filesystem
# Ouput debug message
DOKAN_OPTION_DEBUG = 1
# Ouput debug message to stderr
DOKAN_OPTION_STDERR = 2
# Use alternate stream
DOKAN_OPTION_ALT_STREAM = 4
# Mount drive as write-protected.
DOKAN_OPTION_WRITE_PROTECT = 8
# Use network drive, you need to install Dokan network provider.
DOKAN_OPTION_NETWORK = 16
# Use removable drive
DOKAN_OPTION_REMOVABLE = 32
# Use mount manager
DOKAN_OPTION_MOUNT_MANAGER = 64
# Mount the drive on current session only
DOKAN_OPTION_CURRENT_SESSION = 128
# FileLock in User Mode
DOKAN_OPTION_FILELOCK_USER_MODE = 256
# Error codes returned by DokanMain
DOKAN_SUCCESS = 0
# General Error
DOKAN_ERROR = -1
# Bad Drive letter
DOKAN_DRIVE_LETTER_ERROR = -2
# Can't install driver
DOKAN_DRIVER_INSTALL_ERROR = -3
# Driver something wrong
DOKAN_START_ERROR = -4
# Can't assign a drive letter or mount point
DOKAN_MOUNT_ERROR = -5
# Mount point is invalid
DOKAN_MOUNT_POINT_ERROR = -6
# Requested an incompatible version
DOKAN_VERSION_ERROR = -7
# Misc windows constants
FILE_LIST_DIRECTORY = 0x01
FILE_SHARE_READ = 0x01
FILE_SHARE_WRITE = 0x02
FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
FILE_FLAG_OVERLAPPED = 0x40000000
FILE_ATTRIBUTE_ARCHIVE = 32
FILE_ATTRIBUTE_COMPRESSED = 2048
FILE_ATTRIBUTE_DIRECTORY = 16
FILE_ATTRIBUTE_HIDDEN = 2
FILE_ATTRIBUTE_NORMAL = 128
FILE_ATTRIBUTE_OFFLINE = 4096
FILE_ATTRIBUTE_READONLY = 1
FILE_ATTRIBUTE_SYSTEM = 4
FILE_ATTRIBUTE_TEMPORARY = 4
FILE_CREATE = 2
FILE_OPEN = 1
FILE_OPEN_IF = 3
FILE_OVERWRITE = 4
FILE_SUPERSEDE = 0
FILE_OVERWRITE_IF = 5
FILE_GENERIC_READ = 1179785
FILE_GENERIC_WRITE = 1179926
FILE_DELETE_ON_CLOSE = 0x00001000
REQ_GENERIC_READ = 0x80 | 0x08 | 0x01
REQ_GENERIC_WRITE = 0x004 | 0x0100 | 0x002 | 0x0010
STATUS_SUCCESS = 0x0
STATUS_ACCESS_DENIED = 0xC0000022
STATUS_LOCK_NOT_GRANTED = 0xC0000055
STATUS_NOT_SUPPORTED = 0xC00000BB
STATUS_OBJECT_NAME_COLLISION = 0xC0000035
STATUS_DIRECTORY_NOT_EMPTY = 0xC0000101
STATUS_NOT_LOCKED = 0xC000002A
STATUS_OBJECT_NAME_NOT_FOUND = 0xC0000034
STATUS_NOT_IMPLEMENTED = 0xC0000002
STATUS_OBJECT_PATH_NOT_FOUND = 0xC000003A
STATUS_BUFFER_OVERFLOW = 0x80000005
ERROR_ALREADY_EXISTS = 183
FILE_CASE_SENSITIVE_SEARCH = 0x00000001
FILE_CASE_PRESERVED_NAMES = 0x00000002
FILE_SUPPORTS_REMOTE_STORAGE = 0x00000100
FILE_UNICODE_ON_DISK = 0x00000004
FILE_PERSISTENT_ACLS = 0x00000008
# Some useful per-process global information
NATIVE_ENCODING = sys.getfilesystemencoding()
DATETIME_ZERO = datetime.datetime(1, 1, 1, 0, 0, 0)
DATETIME_STARTUP = datetime.datetime.utcnow()
FILETIME_UNIX_EPOCH = 116444736000000000
def handle_fs_errors(func):
"""Method decorator to report FS errors in the appropriate way.
This decorator catches all FS errors and translates them into an
equivalent OSError, then returns the negated error number. It also
makes the function return zero instead of None as an indication of
successful execution.
"""
func = convert_fs_errors(func)
@wraps(func)
def wrapper(*args, **kwds):
try:
res = func(*args, **kwds)
except OSError as e:
if e.errno:
res = _errno2syserrcode(e.errno)
else:
res = STATUS_ACCESS_DENIED;
except Exception as e:
raise
else:
if res is None:
res = 0
return res
return wrapper
# During long-running operations, Dokan requires that the DokanResetTimeout
# function be called periodically to indicate the progress is still being
# made. Unfortunately we don't have any facility for the underlying FS
# to make these calls for us, so we have to hack around it.
#
# The idea is to use a single background thread to monitor all active Dokan
# method calls, resetting the timeout until they have completed. Note that
# this completely undermines the point of DokanResetTimeout as it's now
# possible for a deadlock to hang the entire filesystem.
_TIMEOUT_PROTECT_THREAD = None
_TIMEOUT_PROTECT_LOCK = threading.Lock()
_TIMEOUT_PROTECT_COND = threading.Condition(_TIMEOUT_PROTECT_LOCK)
_TIMEOUT_PROTECT_QUEUE = deque()
_TIMEOUT_PROTECT_WAIT_TIME = 4 * 60
_TIMEOUT_PROTECT_RESET_TIME = 5 * 60 * 1000
def _start_timeout_protect_thread():
"""Start the background thread used to protect dokan from timeouts.
This function starts the background thread that monitors calls into the
dokan API and resets their timeouts. It's safe to call this more than
once, only a single thread will be started.
"""
global _TIMEOUT_PROTECT_THREAD
with _TIMEOUT_PROTECT_LOCK:
if _TIMEOUT_PROTECT_THREAD is None:
target = _run_timeout_protect_thread
_TIMEOUT_PROTECT_THREAD = threading.Thread(target=target)
_TIMEOUT_PROTECT_THREAD.daemon = True
_TIMEOUT_PROTECT_THREAD.start()
def _run_timeout_protect_thread():
while True:
with _TIMEOUT_PROTECT_COND:
try:
(when, info, finished) = _TIMEOUT_PROTECT_QUEUE.popleft()
except IndexError:
_TIMEOUT_PROTECT_COND.wait()
continue
if finished:
continue
now = time.time()
wait_time = max(0, _TIMEOUT_PROTECT_WAIT_TIME - now + when)
time.sleep(wait_time)
with _TIMEOUT_PROTECT_LOCK:
if finished:
continue
libdokan.DokanResetTimeout(_TIMEOUT_PROTECT_RESET_TIME, info)
_TIMEOUT_PROTECT_QUEUE.append((now + wait_time, info, finished))
def timeout_protect(func):
"""Method decorator to enable timeout protection during call.
This decorator adds an entry to the timeout protect queue before executing
the function, and marks it as finished when the function exits.
"""
@wraps(func)
def wrapper(self, *args):
if _TIMEOUT_PROTECT_THREAD is None:
_start_timeout_protect_thread()
info = args[-1]
finished = []
try:
with _TIMEOUT_PROTECT_COND:
_TIMEOUT_PROTECT_QUEUE.append((time.time(), info, finished))
_TIMEOUT_PROTECT_COND.notify()
return func(self, *args)
finally:
with _TIMEOUT_PROTECT_LOCK:
finished.append(True)
return wrapper
MIN_FH = 100
class FSOperations(object):
"""Object delegating all DOKAN_OPERATIONS pointers to an FS object."""
def __init__(self, fs, fsname="NTFS", volname="Dokan Volume", securityfolder=os.path.expanduser('~')):
if libdokan is None:
msg = 'dokan library (http://dokan-dev.github.io/) is not available'
raise OSError(msg)
self.fs = fs
self.fsname = fsname
self.volname = volname
self.securityfolder = securityfolder
self._files_by_handle = {}
self._files_lock = threading.Lock()
self._next_handle = MIN_FH
# Windows requires us to implement a kind of "lazy deletion", where
# a handle is marked for deletion but this is not actually done
# until the handle is closed. This set monitors pending deletes.
self._pending_delete = set()
# Since pyfilesystem has no locking API, we manage file locks
# in memory. This maps paths to a list of current locks.
self._active_locks = PathMap()
# Dokan expects a succesful write() to be reflected in the file's
# reported size, but the FS might buffer writes and prevent this.
# We explicitly keep track of the size Dokan expects a file to be.
# This dict is indexed by path, then file handle.
self._files_size_written = PathMap()
def get_ops_struct(self):
"""Get a DOKAN_OPERATIONS struct mapping to our methods."""
struct = libdokan.DOKAN_OPERATIONS()
for (nm, typ) in libdokan.DOKAN_OPERATIONS._fields_:
setattr(struct, nm, typ(getattr(self, nm)))
return struct
def _get_file(self, fh):
"""Get the information associated with the given file handle."""
try:
return self._files_by_handle[fh]
except KeyError:
raise FSError("invalid file handle")
def _reg_file(self, f, path):
"""Register a new file handle for the given file and path."""
self._files_lock.acquire()
try:
fh = self._next_handle
self._next_handle += 1
lock = threading.Lock()
self._files_by_handle[fh] = (f, path, lock)
if path not in self._files_size_written:
self._files_size_written[path] = {}
self._files_size_written[path][fh] = 0
return fh
finally:
self._files_lock.release()
def _rereg_file(self, fh, f):
"""Re-register the file handle for the given file.
This might be necessary if we are required to write to a file
after its handle was closed (e.g. to complete an async write).
"""
self._files_lock.acquire()
try:
(f2, path, lock) = self._files_by_handle[fh]
assert f2.closed
self._files_by_handle[fh] = (f, path, lock)
return fh
finally:
self._files_lock.release()
def _del_file(self, fh):
"""Unregister the given file handle."""
self._files_lock.acquire()
try:
(f, path, lock) = self._files_by_handle.pop(fh)
del self._files_size_written[path][fh]
if not self._files_size_written[path]:
del self._files_size_written[path]
finally:
self._files_lock.release()
def _is_pending_delete(self, path):
"""Check if the given path is pending deletion.
This is true if the path or any of its parents have been marked
as pending deletion, false otherwise.
"""
for ppath in recursepath(path):
if ppath in self._pending_delete:
return True
return False
def _check_lock(self, path, offset, length, info, locks=None):
"""Check whether the given file range is locked.
This method implements basic lock checking. It checks all the locks
held against the given file, and if any overlap the given byte range
then it returns STATUS_LOCK_NOT_GRANTED. If the range is not locked, it will
return zero.
"""
if locks is None:
with self._files_lock:
try:
locks = self._active_locks[path]
except KeyError:
return STATUS_SUCCESS
for (lh, lstart, lend) in locks:
if info is not None and info.contents.Context == lh:
continue
if lstart >= offset + length:
continue
if lend <= offset:
continue
return STATUS_LOCK_NOT_GRANTED
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def ZwCreateFile(self, path, securitycontext, access, attribute, sharing, disposition, options, info):
path = self._dokanpath2pyfs(path)
# Can't open files that are pending delete.
if self._is_pending_delete(path):
return STATUS_ACCESS_DENIED
retcode = STATUS_SUCCESS
if self.fs.isdir(path) or info.contents.IsDirectory:
info.contents.IsDirectory = True
exist = self.fs.exists(path)
if disposition == FILE_CREATE:
if self.fs.exists(path):
retcode = STATUS_OBJECT_NAME_COLLISION
self.fs.makedir(path)
elif disposition == FILE_OPEN_IF:
if not self.fs.exists(path):
retcode = STATUS_OBJECT_PATH_NOT_FOUND
else:
# If no access rights are requestsed, only basic metadata is queried.
if not access:
if self.fs.isdir(path):
info.contents.IsDirectory = True
elif not self.fs.exists(path):
return STATUS_OBJECT_NAME_NOT_FOUND
return STATUS_SUCCESS
# This is where we'd convert the access mask into an appropriate
# mode string. Unfortunately, I can't seem to work out all the
# details. I swear MS Word is trying to write to files that it
# opens without asking for write permission.
# For now, just set the mode based on disposition flag.
if disposition == FILE_OVERWRITE_IF or disposition == FILE_SUPERSEDE:
if self.fs.exists(path):
retcode = STATUS_OBJECT_NAME_COLLISION
mode = "w+b"
elif disposition == FILE_OPEN_IF:
if not self.fs.exists(path):
mode = "w+b"
else:
mode = "r+b"
elif disposition == FILE_OPEN:
if not self.fs.exists(path):
return STATUS_OBJECT_NAME_NOT_FOUND
mode = "r+b"
elif disposition == FILE_OVERWRITE:
if not self.fs.exists(path):
return STATUS_OBJECT_NAME_NOT_FOUND
mode = "w+b"
elif disposition == FILE_CREATE:
if self.fs.exists(path):
return STATUS_OBJECT_NAME_COLLISION
mode = "w+b"
else:
mode = "r+b"
# Try to open the requested file. It may actually be a directory.
info.contents.Context = 1
try:
f = self.fs.open(path, mode)
# print(path, mode, repr(f))
except ResourceInvalidError:
info.contents.IsDirectory = True
except FSError as e:
# Sadly, win32 OSFS will raise all kinds of strange errors
# if you try to open() a directory. Need to check by hand.
if self.fs.isdir(path):
info.contents.IsDirectory = True
else:
# print(e)
raise
else:
info.contents.Context = self._reg_file(f, path)
if retcode == STATUS_SUCCESS and (options & FILE_DELETE_ON_CLOSE):
self._pending_delete.add(path)
return retcode
@timeout_protect
@handle_fs_errors
def Cleanup(self, path, info):
path = self._dokanpath2pyfs(path)
if info.contents.IsDirectory:
if info.contents.DeleteOnClose:
self.fs.removedir(path)
self._pending_delete.remove(path)
else:
(file, _, lock) = self._get_file(info.contents.Context)
lock.acquire()
try:
file.close()
if info.contents.DeleteOnClose:
self.fs.remove(path)
self._pending_delete.remove(path)
self._del_file(info.contents.Context)
info.contents.Context = 0
finally:
lock.release()
@timeout_protect
@handle_fs_errors
def CloseFile(self, path, info):
if info.contents.Context >= MIN_FH:
(file, _, lock) = self._get_file(info.contents.Context)
lock.acquire()
try:
file.close()
self._del_file(info.contents.Context)
finally:
lock.release()
info.contents.Context = 0
@timeout_protect
@handle_fs_errors
def ReadFile(self, path, buffer, nBytesToRead, nBytesRead, offset, info):
path = self._dokanpath2pyfs(path)
(file, _, lock) = self._get_file(info.contents.Context)
lock.acquire()
try:
status = self._check_lock(path, offset, nBytesToRead, info)
if status:
return status
# This may be called after Cleanup, meaning we
# need to re-open the file.
if file.closed:
file = self.fs.open(path, file.mode)
self._rereg_file(info.contents.Context, file)
file.seek(offset)
data = file.read(nBytesToRead)
ctypes.memmove(buffer, ctypes.create_string_buffer(data), len(data))
nBytesRead[0] = len(data)
finally:
lock.release()
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def WriteFile(self, path, buffer, nBytesToWrite, nBytesWritten, offset, info):
path = self._dokanpath2pyfs(path)
fh = info.contents.Context
(file, _, lock) = self._get_file(fh)
lock.acquire()
try:
status = self._check_lock(path, offset, nBytesToWrite, info)
if status:
return status
# This may be called after Cleanup, meaning we
# need to re-open the file.
if file.closed:
file = self.fs.open(path, file.mode)
self._rereg_file(info.contents.Context, file)
if info.contents.WriteToEndOfFile:
file.seek(0, os.SEEK_END)
else:
file.seek(offset)
data = ctypes.create_string_buffer(nBytesToWrite)
ctypes.memmove(data, buffer, nBytesToWrite)
file.write(data.raw)
nBytesWritten[0] = len(data.raw)
try:
size_written = self._files_size_written[path][fh]
except KeyError:
pass
else:
if offset + nBytesWritten[0] > size_written:
new_size_written = offset + nBytesWritten[0]
self._files_size_written[path][fh] = new_size_written
finally:
lock.release()
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def FlushFileBuffers(self, path, info):
path = self._dokanpath2pyfs(path)
(file, _, lock) = self._get_file(info.contents.Context)
lock.acquire()
try:
file.flush()
finally:
lock.release()
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def GetFileInformation(self, path, buffer, info):
path = self._dokanpath2pyfs(path)
finfo = self.fs.getinfo(path)
data = buffer.contents
self._info2finddataw(path, finfo, data, info)
try:
written_size = max(self._files_size_written[path].values())
except KeyError:
pass
else:
reported_size = (data.nFileSizeHigh << 32) + data.nFileSizeLow
if written_size > reported_size:
data.nFileSizeHigh = written_size >> 32
data.nFileSizeLow = written_size & 0xffffffff
data.nNumberOfLinks = 1
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def FindFiles(self, path, fillFindData, info):
path = self._dokanpath2pyfs(path)
for (nm, finfo) in self.fs.listdirinfo(path):
fpath = pathjoin(path, nm)
if self._is_pending_delete(fpath):
continue
data = self._info2finddataw(fpath, finfo)
fillFindData(ctypes.byref(data), info)
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def FindFilesWithPattern(self, path, pattern, fillFindData, info):
path = self._dokanpath2pyfs(path)
for (nm, finfo) in self.fs.listdirinfo(path):
fpath = pathjoin(path, nm)
if self._is_pending_delete(fpath):
continue
if not libdokan.DokanIsNameInExpression(pattern, nm, True):
continue
data = self._info2finddataw(fpath, finfo, None)
fillFindData(ctypes.byref(data), info)
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def SetFileAttributes(self, path, attrs, info):
path = self._dokanpath2pyfs(path)
# TODO: decode various file attributes
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def SetFileTime(self, path, ctime, atime, mtime, info):
path = self._dokanpath2pyfs(path)
# setting ctime is not supported
if atime is not None:
try:
atime = _filetime2datetime(atime.contents)
except ValueError:
atime = None
if mtime is not None:
try:
mtime = _filetime2datetime(mtime.contents)
except ValueError:
mtime = None
# some programs demand this succeed; fake it
try:
self.fs.settimes(path, atime, mtime)
except UnsupportedError:
pass
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def DeleteFile(self, path, info):
path = self._dokanpath2pyfs(path)
if not self.fs.isfile(path):
if not self.fs.exists(path):
return STATUS_ACCESS_DENIED
else:
return STATUS_OBJECT_NAME_NOT_FOUND
self._pending_delete.add(path)
# the actual delete takes place in self.CloseFile()
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def DeleteDirectory(self, path, info):
path = self._dokanpath2pyfs(path)
for nm in self.fs.listdir(path):
if not self._is_pending_delete(pathjoin(path, nm)):
return STATUS_DIRECTORY_NOT_EMPTY
self._pending_delete.add(path)
# the actual delete takes place in self.CloseFile()
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def MoveFile(self, src, dst, overwrite, info):
# Close the file if we have an open handle to it.
if info.contents.Context >= MIN_FH:
(file, _, lock) = self._get_file(info.contents.Context)
lock.acquire()
try:
file.close()
self._del_file(info.contents.Context)
finally:
lock.release()
src = self._dokanpath2pyfs(src)
dst = self._dokanpath2pyfs(dst)
if info.contents.IsDirectory:
self.fs.movedir(src, dst, overwrite=overwrite)
else:
self.fs.move(src, dst, overwrite=overwrite)
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def SetEndOfFile(self, path, length, info):
self._dokanpath2pyfs(path)
(file, _, lock) = self._get_file(info.contents.Context)
lock.acquire()
try:
pos = file.tell()
if length != pos:
file.seek(length)
file.truncate()
if pos < length:
file.seek(min(pos, length))
finally:
lock.release()
return STATUS_SUCCESS
@handle_fs_errors
def GetDiskFreeSpace(self, nBytesAvail, nBytesTotal, nBytesFree, info):
# This returns a stupidly large number if not info is available.
# It's better to pretend an operation is possible and have it fail
# than to pretend an operation will fail when it's actually possible.
large_amount = 100 * 1024 * 1024 * 1024
nBytesFree[0] = self.fs.getmeta("free_space", large_amount)
nBytesTotal[0] = self.fs.getmeta("total_space", 2 * large_amount)
nBytesAvail[0] = nBytesFree[0]
return STATUS_SUCCESS
@handle_fs_errors
def GetVolumeInformation(self, vnmBuf, vnmSz, sNum, maxLen, flags, fnmBuf, fnmSz, info):
nm = ctypes.create_unicode_buffer(self.volname[:vnmSz - 1])
sz = (len(nm.value) + 1) * ctypes.sizeof(ctypes.c_wchar)
ctypes.memmove(vnmBuf, nm, sz)
if sNum:
sNum[0] = 0
if maxLen:
maxLen[0] = 255
if flags:
flags[0] = FILE_CASE_SENSITIVE_SEARCH | FILE_CASE_PRESERVED_NAMES | FILE_SUPPORTS_REMOTE_STORAGE | FILE_UNICODE_ON_DISK | FILE_PERSISTENT_ACLS;
nm = ctypes.create_unicode_buffer(self.fsname[:fnmSz - 1])
sz = (len(nm.value) + 1) * ctypes.sizeof(ctypes.c_wchar)
ctypes.memmove(fnmBuf, nm, sz)
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def SetAllocationSize(self, path, length, info):
# I think this is supposed to reserve space for the file
# but *not* actually move the end-of-file marker.
# No way to do that in pyfs.
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def LockFile(self, path, offset, length, info):
end = offset + length
with self._files_lock:
try:
locks = self._active_locks[path]
except KeyError:
locks = self._active_locks[path] = []
else:
status = self._check_lock(path, offset, length, None, locks)
if status:
return status
locks.append((info.contents.Context, offset, end))
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def UnlockFile(self, path, offset, length, info):
with self._files_lock:
try:
locks = self._active_locks[path]
except KeyError:
return STATUS_NOT_LOCKED
todel = []
for i, (lh, lstart, lend) in enumerate(locks):
if info.contents.Context == lh:
if lstart == offset:
if lend == offset + length:
todel.append(i)
if not todel:
return STATUS_NOT_LOCKED
for i in reversed(todel):
del locks[i]
return STATUS_SUCCESS
@handle_fs_errors
def GetFileSecurity(self, path, securityinformation, securitydescriptor, securitydescriptorlength, neededlength, info):
securitydescriptor = ctypes.cast(securitydescriptor, libdokan.PSECURITY_DESCRIPTOR)
path = self._dokanpath2pyfs(path)
if self.fs.isdir(path):
res = libdokan.GetFileSecurity(
self.securityfolder,
ctypes.cast(securityinformation, libdokan.PSECURITY_INFORMATION)[0],
securitydescriptor,
securitydescriptorlength,
neededlength,
)
return STATUS_SUCCESS if res else STATUS_BUFFER_OVERFLOW
return STATUS_NOT_IMPLEMENTED
@handle_fs_errors
def SetFileSecurity(self, path, securityinformation, securitydescriptor, securitydescriptorlength, info):
return STATUS_NOT_IMPLEMENTED
@handle_fs_errors
def Mounted(self, info):
return STATUS_SUCCESS
@handle_fs_errors
def Unmounted(self, info):
return STATUS_SUCCESS
@handle_fs_errors
def FindStreams(self, path, callback, info):
return STATUS_NOT_IMPLEMENTED
def _dokanpath2pyfs(self, path):
path = path.replace('\\', '/')
return normpath(path)
def _info2attrmask(self, path, info, hinfo=None):
"""Convert a file/directory info dict to a win32 file attribute mask."""
attrs = 0
st_mode = info.get("st_mode", None)
if st_mode:
if statinfo.S_ISDIR(st_mode):
attrs |= FILE_ATTRIBUTE_DIRECTORY
elif statinfo.S_ISREG(st_mode):
attrs |= FILE_ATTRIBUTE_NORMAL
if not attrs and hinfo:
if hinfo.contents.IsDirectory:
attrs |= FILE_ATTRIBUTE_DIRECTORY
else:
attrs |= FILE_ATTRIBUTE_NORMAL
if not attrs:
if self.fs.isdir(path):
attrs |= FILE_ATTRIBUTE_DIRECTORY
else:
attrs |= FILE_ATTRIBUTE_NORMAL
return attrs
def _info2finddataw(self, path, info, data=None, hinfo=None):
"""Convert a file/directory info dict into a WIN32_FIND_DATAW struct."""
if data is None:
data = libdokan.WIN32_FIND_DATAW()
data.dwFileAttributes = self._info2attrmask(path, info, hinfo)
data.ftCreationTime = _datetime2filetime(info.get("created_time", None))
data.ftLastAccessTime = _datetime2filetime(info.get("accessed_time", None))
data.ftLastWriteTime = _datetime2filetime(info.get("modified_time", None))
data.nFileSizeHigh = info.get("size", 0) >> 32
data.nFileSizeLow = info.get("size", 0) & 0xffffffff
data.cFileName = basename(path)
data.cAlternateFileName = ""
return data
def _datetime2timestamp(dtime):
"""Convert a datetime object to a unix timestamp."""
t = time.mktime(dtime.timetuple())
t += dtime.microsecond / 1000000.0
return t
def _timestamp2datetime(tstamp):
"""Convert a unix timestamp to a datetime object."""
return datetime.datetime.fromtimestamp(tstamp)
def _timestamp2filetime(tstamp):
f = FILETIME_UNIX_EPOCH + int(tstamp * 10000000)
return libdokan.FILETIME(f & 0xffffffff, f >> 32)
def _filetime2timestamp(ftime):
f = ftime.dwLowDateTime | (ftime.dwHighDateTime << 32)
return (f - FILETIME_UNIX_EPOCH) / 10000000.0
def _filetime2datetime(ftime):
"""Convert a FILETIME struct info datetime.datetime object."""
if ftime is None:
return DATETIME_ZERO
if ftime.dwLowDateTime == 0 and ftime.dwHighDateTime == 0:
return DATETIME_ZERO
return _timestamp2datetime(_filetime2timestamp(ftime))
def _datetime2filetime(dtime):
"""Convert a FILETIME struct info datetime.datetime object."""
if dtime is None:
return libdokan.FILETIME(0, 0)
if dtime == DATETIME_ZERO:
return libdokan.FILETIME(0, 0)
return _timestamp2filetime(_datetime2timestamp(dtime))
def _errno2syserrcode(eno):
"""Convert an errno into a win32 system error code."""
if eno == errno.EEXIST:
return STATUS_OBJECT_NAME_COLLISION
if eno == errno.ENOTEMPTY:
return STATUS_DIRECTORY_NOT_EMPTY
if eno == errno.ENOSYS:
return STATUS_NOT_SUPPORTED
if eno == errno.EACCES:
return STATUS_ACCESS_DENIED
return eno
def _check_path_string(path): # TODO Probably os.path has a better check for this...
"""Check path string."""
if not path or not path[0].isalpha() or not path[1:3] == ':\\':
raise ValueError("invalid path: %r" % (path,))
def mount(fs, path, foreground=False, ready_callback=None, unmount_callback=None, **kwds):
"""Mount the given FS at the given path, using Dokan.
By default, this function spawns a new background process to manage the
Dokan event loop. The return value in this case is an instance of the
'MountProcess' class, a subprocess.Popen subclass.
If the keyword argument 'foreground' is given, we instead run the Dokan
main loop in the current process. In this case the function will block
until the filesystem is unmounted, then return None.
If the keyword argument 'ready_callback' is provided, it will be called
when the filesystem has been mounted and is ready for use. Any additional
keyword arguments control the behavior of the final dokan mount point.
Some interesting options include:
* numthreads: number of threads to use for handling Dokan requests
* fsname: name to display in explorer etc
* flags: DOKAN_OPTIONS bitmask
* securityfolder: folder path used to duplicate security rights on all folders
* FSOperationsClass: custom FSOperations subclass to use
"""
if libdokan is None:
raise OSError("the dokan library is not available")
_check_path_string(path)
# This function captures the logic of checking whether the Dokan mount
# is up and running. Unfortunately I can't find a way to get this
# via a callback in the Dokan API. Instead we just check for the path
# in a loop, polling the mount proc to make sure it hasn't died.
def check_alive(mp):
if mp and mp.poll() is not None:
raise OSError("dokan mount process exited prematurely")
def check_ready(mp=None):
if ready_callback is not False:
check_alive(mp)
for _ in range(100):
try:
os.stat(path)
except EnvironmentError:
check_alive(mp)
time.sleep(0.05)
else:
check_alive(mp)
if ready_callback:
return ready_callback()
else:
return None
else:
check_alive(mp)
raise OSError("dokan mount process seems to be hung")
# Running the the foreground is the final endpoint for the mount
# operation, it's where we call DokanMain().
if foreground:
numthreads = kwds.pop("numthreads", 0)
flags = kwds.pop("flags", 0)
FSOperationsClass = kwds.pop("FSOperationsClass", FSOperations)
opts = libdokan.DOKAN_OPTIONS(libdokan.DOKAN_MINIMUM_COMPATIBLE_VERSION, numthreads, flags, 0, path, "", 2000, 512, 512)
ops = FSOperationsClass(fs, **kwds)
if ready_callback:
check_thread = threading.Thread(target=check_ready)
check_thread.daemon = True
check_thread.start()
opstruct = ops.get_ops_struct()
res = libdokan.DokanMain(ctypes.byref(opts), ctypes.byref(opstruct))
if res != DOKAN_SUCCESS:
raise OSError("Dokan failed with error: %d" % (res,))
if unmount_callback:
unmount_callback()
# Running the background, spawn a subprocess and wait for it
# to be ready before returning.
else:
mp = MountProcess(fs, path, kwds)
check_ready(mp)
if unmount_callback:
orig_unmount = mp.unmount
def new_unmount():
orig_unmount()
unmount_callback()
mp.unmount = new_unmount
return mp
def unmount(path):
"""Unmount the given path.
This function unmounts the dokan path mounted at the given path.
It works but may leave dangling processes; its better to use the "unmount"
method on the MountProcess class if you have one.
"""
_check_path_string(path)
if not libdokan.DokanRemoveMountPoint(path):
raise OSError("filesystem could not be unmounted: %s" % (path,))
class MountProcess(subprocess.Popen):
"""subprocess.Popen subclass managing a Dokan mount.
This is a subclass of subprocess.Popen, designed for easy management of
a Dokan mount in a background process. Rather than specifying the command
to execute, pass in the FS object to be mounted, the target path
and a dictionary of options for the Dokan process.
In order to be passed successfully to the new process, the FS object
must be pickleable. Since win32 has no fork() this restriction is not
likely to be lifted (see also the "multiprocessing" module)
This class has an extra attribute 'path' giving the path of the mounted
filesystem, and an extra method 'unmount' that will cleanly unmount it
and terminate the process.
"""
# This works by spawning a new python interpreter and passing it the
# pickled (fs,path,opts) tuple on the command-line. Something like this:
#
# python -c "import MountProcess; MountProcess._do_mount('..data..')
#
unmount_timeout = 5
def __init__(self, fs, path, dokan_opts={}, nowait=False, **kwds):
if libdokan is None:
raise OSError("the dokan library is not available")
_check_path_string(path)
self.path = path
cmd = "try: import cPickle as pickle;\n"
cmd = cmd + "except ImportError: import pickle;\n"
cmd = cmd + "data = pickle.loads(%s); "
cmd = cmd + "from fs.expose.dokan import MountProcess; "
cmd = cmd + "MountProcess._do_mount(data)"
cmd = cmd % (repr(pickle.dumps((fs, path, dokan_opts, nowait), -1)),)
cmd = [sys.executable, "-c", cmd]
super(MountProcess, self).__init__(cmd, **kwds)
def unmount(self):
"""Cleanly unmount the Dokan filesystem, terminating this subprocess."""
if not libdokan.DokanRemoveMountPoint(self.path):
raise OSError("the filesystem could not be unmounted: %s" %(self.path,))
self.terminate()
if not hasattr(subprocess.Popen, "terminate"):
def terminate(self):
"""Gracefully terminate the subprocess."""
kernel32.TerminateProcess(int(self._handle), -1)
if not hasattr(subprocess.Popen, "kill"):
def kill(self):
"""Forcibly terminate the subprocess."""
kernel32.TerminateProcess(int(self._handle), -1)
@staticmethod
def _do_mount(data):
"""Perform the specified mount."""
(fs, path, opts, nowait) = data
opts["foreground"] = True
def unmount_callback():
fs.close()
opts["unmount_callback"] = unmount_callback
if nowait:
opts["ready_callback"] = False
mount(fs, path, **opts)
class Win32SafetyFS(WrapFS):
"""FS wrapper for extra safety when mounting on win32.
This wrapper class provides some safety features when mounting untrusted
filesystems on win32. Specifically:
* hiding autorun files
* removing colons from paths
"""
def __init__(self, wrapped_fs, allow_autorun=False):
self.allow_autorun = allow_autorun
super(Win32SafetyFS, self).__init__(wrapped_fs)
def _encode(self, path):
path = relpath(normpath(path))
path = path.replace(":", "__colon__")
if not self.allow_autorun:
if path.lower().startswith("_autorun."):
path = path[1:]
return path
def _decode(self, path):
path = relpath(normpath(path))
path = path.replace("__colon__", ":")
if not self.allow_autorun:
if path.lower().startswith("autorun."):
path = "_" + path
return path
if __name__ == "__main__":
import os.path
import tempfile
from fs.osfs import OSFS
from fs.memoryfs import MemoryFS
from shutil import rmtree
from six import b
path = tempfile.mkdtemp()
try:
fs = OSFS(path)
#fs = MemoryFS()
fs.setcontents("test1.txt", b("test one"))
flags = DOKAN_OPTION_DEBUG | DOKAN_OPTION_STDERR | DOKAN_OPTION_REMOVABLE
mount(fs, "Q:\\", foreground=True, numthreads=1, flags=flags)
fs.close()
finally:
rmtree(path)
|
py | 1a52074689a5b14a0549fb66f4002de8437630e8 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Locale dependent formatting and parsing of numeric data.
The default locale for the functions in this module is determined by the
following environment variables, in that order:
* ``LC_NUMERIC``,
* ``LC_ALL``, and
* ``LANG``
"""
# TODO:
# Padding and rounding increments in pattern:
# - http://www.unicode.org/reports/tr35/ (Appendix G.6)
import math
import re
try:
from decimal import Decimal
have_decimal = True
except ImportError:
have_decimal = False
from babel.core import default_locale, Locale
__all__ = ['format_number', 'format_decimal', 'format_currency',
'format_percent', 'format_scientific', 'parse_number',
'parse_decimal', 'NumberFormatError']
__docformat__ = 'restructuredtext en'
LC_NUMERIC = default_locale('LC_NUMERIC')
def get_currency_name(currency, locale=LC_NUMERIC):
"""Return the name used by the locale for the specified currency.
>>> get_currency_name('USD', 'en_US')
u'US Dollar'
:param currency: the currency code
:param locale: the `Locale` object or locale identifier
:return: the currency symbol
:rtype: `unicode`
:since: version 0.9.4
"""
return Locale.parse(locale).currencies.get(currency, currency)
def get_currency_symbol(currency, locale=LC_NUMERIC):
"""Return the symbol used by the locale for the specified currency.
>>> get_currency_symbol('USD', 'en_US')
u'$'
:param currency: the currency code
:param locale: the `Locale` object or locale identifier
:return: the currency symbol
:rtype: `unicode`
"""
return Locale.parse(locale).currency_symbols.get(currency, currency)
def get_decimal_symbol(locale=LC_NUMERIC):
"""Return the symbol used by the locale to separate decimal fractions.
>>> get_decimal_symbol('en_US')
u'.'
:param locale: the `Locale` object or locale identifier
:return: the decimal symbol
:rtype: `unicode`
"""
return Locale.parse(locale).number_symbols.get('decimal', u'.')
def get_plus_sign_symbol(locale=LC_NUMERIC):
"""Return the plus sign symbol used by the current locale.
>>> get_plus_sign_symbol('en_US')
u'+'
:param locale: the `Locale` object or locale identifier
:return: the plus sign symbol
:rtype: `unicode`
"""
return Locale.parse(locale).number_symbols.get('plusSign', u'+')
def get_minus_sign_symbol(locale=LC_NUMERIC):
"""Return the plus sign symbol used by the current locale.
>>> get_minus_sign_symbol('en_US')
u'-'
:param locale: the `Locale` object or locale identifier
:return: the plus sign symbol
:rtype: `unicode`
"""
return Locale.parse(locale).number_symbols.get('minusSign', u'-')
def get_exponential_symbol(locale=LC_NUMERIC):
"""Return the symbol used by the locale to separate mantissa and exponent.
>>> get_exponential_symbol('en_US')
u'E'
:param locale: the `Locale` object or locale identifier
:return: the exponential symbol
:rtype: `unicode`
"""
return Locale.parse(locale).number_symbols.get('exponential', u'E')
def get_group_symbol(locale=LC_NUMERIC):
"""Return the symbol used by the locale to separate groups of thousands.
>>> get_group_symbol('en_US')
u','
:param locale: the `Locale` object or locale identifier
:return: the group symbol
:rtype: `unicode`
"""
return Locale.parse(locale).number_symbols.get('group', u',')
def format_number(number, locale=LC_NUMERIC):
"""Return the given number formatted for a specific locale.
>>> format_number(1099, locale='en_US')
u'1,099'
:param number: the number to format
:param locale: the `Locale` object or locale identifier
:return: the formatted number
:rtype: `unicode`
"""
# Do we really need this one?
return format_decimal(number, locale=locale)
def format_decimal(number, format=None, locale=LC_NUMERIC):
"""Return the given decimal number formatted for a specific locale.
>>> format_decimal(1.2345, locale='en_US')
u'1.234'
>>> format_decimal(1.2346, locale='en_US')
u'1.235'
>>> format_decimal(-1.2346, locale='en_US')
u'-1.235'
>>> format_decimal(1.2345, locale='sv_SE')
u'1,234'
>>> format_decimal(12345, locale='de')
u'12.345'
The appropriate thousands grouping and the decimal separator are used for
each locale:
>>> format_decimal(12345.5, locale='en_US')
u'12,345.5'
:param number: the number to format
:param format:
:param locale: the `Locale` object or locale identifier
:return: the formatted decimal number
:rtype: `unicode`
"""
locale = Locale.parse(locale)
if not format:
format = locale.decimal_formats.get(format)
pattern = parse_pattern(format)
return pattern.apply(number, locale)
def format_currency(number, currency, format=None, locale=LC_NUMERIC):
u"""Return formatted currency value.
>>> format_currency(1099.98, 'USD', locale='en_US')
u'$1,099.98'
>>> format_currency(1099.98, 'USD', locale='es_CO')
u'US$\\xa01.099,98'
>>> format_currency(1099.98, 'EUR', locale='de_DE')
u'1.099,98\\xa0\\u20ac'
The pattern can also be specified explicitly:
>>> format_currency(1099.98, 'EUR', u'\xa4\xa4 #,##0.00', locale='en_US')
u'EUR 1,099.98'
:param number: the number to format
:param currency: the currency code
:param locale: the `Locale` object or locale identifier
:return: the formatted currency value
:rtype: `unicode`
"""
locale = Locale.parse(locale)
if not format:
format = locale.currency_formats.get(format)
pattern = parse_pattern(format)
return pattern.apply(number, locale, currency=currency)
def format_percent(number, format=None, locale=LC_NUMERIC):
"""Return formatted percent value for a specific locale.
>>> format_percent(0.34, locale='en_US')
u'34%'
>>> format_percent(25.1234, locale='en_US')
u'2,512%'
>>> format_percent(25.1234, locale='sv_SE')
u'2\\xa0512\\xa0%'
The format pattern can also be specified explicitly:
>>> format_percent(25.1234, u'#,##0\u2030', locale='en_US')
u'25,123\u2030'
:param number: the percent number to format
:param format:
:param locale: the `Locale` object or locale identifier
:return: the formatted percent number
:rtype: `unicode`
"""
locale = Locale.parse(locale)
if not format:
format = locale.percent_formats.get(format)
pattern = parse_pattern(format)
return pattern.apply(number, locale)
def format_scientific(number, format=None, locale=LC_NUMERIC):
"""Return value formatted in scientific notation for a specific locale.
>>> format_scientific(10000, locale='en_US')
u'1E4'
The format pattern can also be specified explicitly:
>>> format_scientific(1234567, u'##0E00', locale='en_US')
u'1.23E06'
:param number: the number to format
:param format:
:param locale: the `Locale` object or locale identifier
:return: value formatted in scientific notation.
:rtype: `unicode`
"""
locale = Locale.parse(locale)
if not format:
format = locale.scientific_formats.get(format)
pattern = parse_pattern(format)
return pattern.apply(number, locale)
class NumberFormatError(ValueError):
"""Exception raised when a string cannot be parsed into a number."""
def parse_number(string, locale=LC_NUMERIC):
"""Parse localized number string into a long integer.
>>> parse_number('1,099', locale='en_US')
1099L
>>> parse_number('1.099', locale='de_DE')
1099L
When the given string cannot be parsed, an exception is raised:
>>> parse_number('1.099,98', locale='de')
Traceback (most recent call last):
...
NumberFormatError: '1.099,98' is not a valid number
:param string: the string to parse
:param locale: the `Locale` object or locale identifier
:return: the parsed number
:rtype: `long`
:raise `NumberFormatError`: if the string can not be converted to a number
"""
try:
return long(string.replace(get_group_symbol(locale), ''))
except ValueError:
raise NumberFormatError('%r is not a valid number' % string)
def parse_decimal(string, locale=LC_NUMERIC):
"""Parse localized decimal string into a float.
>>> parse_decimal('1,099.98', locale='en_US')
1099.98
>>> parse_decimal('1.099,98', locale='de')
1099.98
When the given string cannot be parsed, an exception is raised:
>>> parse_decimal('2,109,998', locale='de')
Traceback (most recent call last):
...
NumberFormatError: '2,109,998' is not a valid decimal number
:param string: the string to parse
:param locale: the `Locale` object or locale identifier
:return: the parsed decimal number
:rtype: `float`
:raise `NumberFormatError`: if the string can not be converted to a
decimal number
"""
locale = Locale.parse(locale)
try:
return float(string.replace(get_group_symbol(locale), '')
.replace(get_decimal_symbol(locale), '.'))
except ValueError:
raise NumberFormatError('%r is not a valid decimal number' % string)
PREFIX_END = r'[^0-9@#.,]'
NUMBER_TOKEN = r'[0-9@#.\-,E+]'
PREFIX_PATTERN = r"(?P<prefix>(?:'[^']*'|%s)*)" % PREFIX_END
NUMBER_PATTERN = r"(?P<number>%s+)" % NUMBER_TOKEN
SUFFIX_PATTERN = r"(?P<suffix>.*)"
number_re = re.compile(r"%s%s%s" % (PREFIX_PATTERN, NUMBER_PATTERN,
SUFFIX_PATTERN))
def split_number(value):
"""Convert a number into a (intasstring, fractionasstring) tuple"""
if have_decimal and isinstance(value, Decimal):
text = str(value)
else:
text = ('%.9f' % value).rstrip('0')
if '.' in text:
a, b = text.split('.', 1)
if b == '0':
b = ''
else:
a, b = text, ''
return a, b
def bankersround(value, ndigits=0):
"""Round a number to a given precision.
Works like round() except that the round-half-even (banker's rounding)
algorithm is used instead of round-half-up.
>>> bankersround(5.5, 0)
6.0
>>> bankersround(6.5, 0)
6.0
>>> bankersround(-6.5, 0)
-6.0
>>> bankersround(1234.0, -2)
1200.0
"""
sign = int(value < 0) and -1 or 1
value = abs(value)
a, b = split_number(value)
digits = a + b
add = 0
i = len(a) + ndigits
if i < 0 or i >= len(digits):
pass
elif digits[i] > '5':
add = 1
elif digits[i] == '5' and digits[i-1] in '13579':
add = 1
scale = 10**ndigits
if have_decimal and isinstance(value, Decimal):
return Decimal(int(value * scale + add)) / scale * sign
else:
return float(int(value * scale + add)) / scale * sign
def parse_pattern(pattern):
"""Parse number format patterns"""
if isinstance(pattern, NumberPattern):
return pattern
# Do we have a negative subpattern?
if ';' in pattern:
pattern, neg_pattern = pattern.split(';', 1)
pos_prefix, number, pos_suffix = number_re.search(pattern).groups()
neg_prefix, _, neg_suffix = number_re.search(neg_pattern).groups()
else:
pos_prefix, number, pos_suffix = number_re.search(pattern).groups()
neg_prefix = '-' + pos_prefix
neg_suffix = pos_suffix
if 'E' in number:
number, exp = number.split('E', 1)
else:
exp = None
if '@' in number:
if '.' in number and '0' in number:
raise ValueError('Significant digit patterns can not contain '
'"@" or "0"')
if '.' in number:
integer, fraction = number.rsplit('.', 1)
else:
integer = number
fraction = ''
min_frac = max_frac = 0
def parse_precision(p):
"""Calculate the min and max allowed digits"""
min = max = 0
for c in p:
if c in '@0':
min += 1
max += 1
elif c == '#':
max += 1
elif c == ',':
continue
else:
break
return min, max
def parse_grouping(p):
"""Parse primary and secondary digit grouping
>>> parse_grouping('##')
0, 0
>>> parse_grouping('#,###')
3, 3
>>> parse_grouping('#,####,###')
3, 4
"""
width = len(p)
g1 = p.rfind(',')
if g1 == -1:
return 1000, 1000
g1 = width - g1 - 1
g2 = p[:-g1 - 1].rfind(',')
if g2 == -1:
return g1, g1
g2 = width - g1 - g2 - 2
return g1, g2
int_prec = parse_precision(integer)
frac_prec = parse_precision(fraction)
if exp:
frac_prec = parse_precision(integer+fraction)
exp_plus = exp.startswith('+')
exp = exp.lstrip('+')
exp_prec = parse_precision(exp)
else:
exp_plus = None
exp_prec = None
grouping = parse_grouping(integer)
return NumberPattern(pattern, (pos_prefix, neg_prefix),
(pos_suffix, neg_suffix), grouping,
int_prec, frac_prec,
exp_prec, exp_plus)
class NumberPattern(object):
def __init__(self, pattern, prefix, suffix, grouping,
int_prec, frac_prec, exp_prec, exp_plus):
self.pattern = pattern
self.prefix = prefix
self.suffix = suffix
self.grouping = grouping
self.int_prec = int_prec
self.frac_prec = frac_prec
self.exp_prec = exp_prec
self.exp_plus = exp_plus
if '%' in ''.join(self.prefix + self.suffix):
self.scale = 100
elif u'‰' in ''.join(self.prefix + self.suffix):
self.scale = 1000
else:
self.scale = 1
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self.pattern)
def apply(self, value, locale, currency=None):
value *= self.scale
is_negative = int(value < 0)
if self.exp_prec: # Scientific notation
value = abs(value)
if value:
exp = int(math.floor(math.log(value, 10)))
else:
exp = 0
# Minimum number of integer digits
if self.int_prec[0] == self.int_prec[1]:
exp -= self.int_prec[0] - 1
# Exponent grouping
elif self.int_prec[1]:
exp = int(exp) / self.int_prec[1] * self.int_prec[1]
if not have_decimal or not isinstance(value, Decimal):
value = float(value)
if exp < 0:
value = value * 10**(-exp)
else:
value = value / 10**exp
exp_sign = ''
if exp < 0:
exp_sign = get_minus_sign_symbol(locale)
elif self.exp_plus:
exp_sign = get_plus_sign_symbol(locale)
exp = abs(exp)
number = u'%s%s%s%s' % \
(self._format_sigdig(value, self.frac_prec[0],
self.frac_prec[1]),
get_exponential_symbol(locale), exp_sign,
self._format_int(str(exp), self.exp_prec[0],
self.exp_prec[1], locale))
elif '@' in self.pattern: # Is it a siginificant digits pattern?
text = self._format_sigdig(abs(value),
self.int_prec[0],
self.int_prec[1])
if '.' in text:
a, b = text.split('.')
a = self._format_int(a, 0, 1000, locale)
if b:
b = get_decimal_symbol(locale) + b
number = a + b
else:
number = self._format_int(text, 0, 1000, locale)
else: # A normal number pattern
a, b = split_number(bankersround(abs(value),
self.frac_prec[1]))
b = b or '0'
a = self._format_int(a, self.int_prec[0],
self.int_prec[1], locale)
b = self._format_frac(b, locale)
number = a + b
retval = u'%s%s%s' % (self.prefix[is_negative], number,
self.suffix[is_negative])
if u'¤' in retval:
retval = retval.replace(u'¤¤', currency.upper())
retval = retval.replace(u'¤', get_currency_symbol(currency, locale))
return retval
def _format_sigdig(self, value, min, max):
"""Convert value to a string.
The resulting string will contain between (min, max) number of
significant digits.
"""
a, b = split_number(value)
ndecimals = len(a)
if a == '0' and b != '':
ndecimals = 0
while b.startswith('0'):
b = b[1:]
ndecimals -= 1
a, b = split_number(bankersround(value, max - ndecimals))
digits = len((a + b).lstrip('0'))
if not digits:
digits = 1
# Figure out if we need to add any trailing '0':s
if len(a) >= max and a != '0':
return a
if digits < min:
b += ('0' * (min - digits))
if b:
return '%s.%s' % (a, b)
return a
def _format_int(self, value, min, max, locale):
width = len(value)
if width < min:
value = '0' * (min - width) + value
gsize = self.grouping[0]
ret = ''
symbol = get_group_symbol(locale)
while len(value) > gsize:
ret = symbol + value[-gsize:] + ret
value = value[:-gsize]
gsize = self.grouping[1]
return value + ret
def _format_frac(self, value, locale):
min, max = self.frac_prec
if len(value) < min:
value += ('0' * (min - len(value)))
if max == 0 or (min == 0 and int(value) == 0):
return ''
width = len(value)
while len(value) > min and value[-1] == '0':
value = value[:-1]
return get_decimal_symbol(locale) + value
|
py | 1a52085a173730b999c15d63577bffa0469af7bc | from heapq import *
from typing import List, Union, Tuple
import numpy as np
from skimage.draw import line as skline
from seedpod_ground_risk.pathfinding.algorithm import Algorithm
from seedpod_ground_risk.pathfinding.environment import GridEnvironment, Node
from seedpod_ground_risk.pathfinding.heuristic import Heuristic, ManhattanHeuristic
def _reconstruct_path(end: Node, grid: np.ndarray, smooth=True) -> List[Node]:
reverse_path = []
reverse_path_append = reverse_path.append
reverse_path_append(end)
node = end
while node is not None:
reverse_path_append(node)
if node.parent is None:
break
if node == node.parent:
reverse_path_append(node.parent)
break
node = node.parent
path = list(reversed(reverse_path))
if not smooth:
return path
def get_path_sum(nx, ny, tx, ty, grid):
line = skline(nx, ny, tx, ty)
line_points = grid[line[0], line[1]]
# If the new line crosses any blocked areas the cost is inf
if -1 in line_points:
return np.inf
else:
return line_points.sum()
def jump_path(node: Node, path, grid, goal: Node):
ny, nx = node.position
gy, gx = goal.position
if get_path_sum(nx, ny, gx, gy, grid) == 0:
return goal
start_node_index = path.index(node)
next_node_index = start_node_index + 1
for test_node_index in reversed(range(len(path))):
# Ensure still looking forward from start node
if test_node_index > next_node_index:
ty, tx = path[test_node_index].position
path_x = [p.position[1] for p in path[start_node_index:test_node_index]]
path_y = [p.position[0] for p in path[start_node_index:test_node_index]]
existing_path_sum = grid[path_y, path_x].sum()
test_path_sum = get_path_sum(nx, ny, tx, ty, grid)
if test_path_sum <= existing_path_sum:
return path[test_node_index]
return path[next_node_index]
simplfied_path = []
next_node = path[0]
simplfied_path.append(next_node)
while next_node != end:
jump_node = jump_path(next_node, path, grid, end)
simplfied_path.append(jump_node)
next_node = jump_node
return simplfied_path
class GridAStar(Algorithm):
def __init__(self, heuristic: Heuristic = ManhattanHeuristic()):
self.heuristic = heuristic.h
def find_path(self, environment: GridEnvironment, start: Node, end: Node) -> Union[
List[Node], None]:
pass
# Canonical algorithm from literature
class RiskAStar(Algorithm):
def find_path(self, environment: GridEnvironment, start: Node, end: Node, k=0.9, smooth=True, **kwargs) -> Union[
List[Node], None]:
grid = environment.grid
min_dist = 2 ** 0.5
goal_val = grid[end.position]
# Use heapq;the thread safety provided by PriorityQueue is not needed, as we only exec on a single thread
open = [start]
start.f = start.g = start.h = 0
open_cost = {start: start.f}
closed = set()
while open:
node = heappop(open)
if node in open_cost:
open_cost.pop(node)
if node in closed:
continue
closed.add(node)
if node == end:
return _reconstruct_path(node, grid, smooth=smooth)
current_cost = node.f
node_val = grid[node.position]
for neighbour in environment.get_neighbours(node):
cost = current_cost \
+ (((grid[neighbour.position] + node_val) / 2)
* (((node.position[1] - neighbour.position[1]) ** 2 + (
node.position[0] - neighbour.position[0]) ** 2) ** 0.5))
if cost < neighbour.g:
neighbour.g = cost
dist = ((node.position[1] - end.position[1]) ** 2 + (
node.position[0] - end.position[0]) ** 2) ** 0.5
line = skline(node.position[1], node.position[0], end.position[1], end.position[0])
min_val = grid[line[0], line[1]].min()
node_val = grid[node.position]
h = k * ((((node_val + goal_val) / 2) * min_dist) + ((dist - min_dist) * min_val))
# h = self.heuristic(neighbour.position, end.position)
neighbour.h = h
neighbour.f = cost + h
neighbour.parent = node
if neighbour not in open_cost or neighbour.f < open_cost[neighbour]:
heappush(open, neighbour)
open_cost[neighbour] = neighbour.f
return None
class RiskGridAStar(GridAStar):
def find_path(self, environment: GridEnvironment, start: Node, end: Node, k=1, smooth=True, **kwargs) -> Union[
List[Node], None]:
grid = environment.grid
# Use heapq;the thread safety provided by PriorityQueue is not needed, as we only exec on a single thread
open = [start]
start.f = start.g = start.h = 0
open_cost = {start: start.f}
closed = set()
while open:
node = heappop(open)
if node in open_cost:
open_cost.pop(node)
if node in closed:
continue
closed.add(node)
if node == end:
return _reconstruct_path(node, grid, smooth=smooth)
current_cost = node.f
for neighbour in environment.get_neighbours(node):
cost = current_cost + grid[neighbour.position]
if cost < neighbour.g:
neighbour.g = cost
h = abs((node.position[0] - end.position[0])) + abs((node.position[1] - end.position[1]))
neighbour.h = h
neighbour.f = cost + (k * h)
neighbour.parent = node
if neighbour not in open_cost or neighbour.f < open_cost[neighbour]:
heappush(open, neighbour)
open_cost[neighbour] = neighbour.f
return None
class JumpPointSearchAStar(GridAStar):
def find_path(self, environment: GridEnvironment, start: Node, end: Node) -> Union[
List[Node], None]:
if not environment.diagonals:
raise ValueError('JPS relies on a grid environment with diagonals')
self.environment = environment
grid = environment.grid
self._max_y, self._max_x = self.environment.grid.shape[0] - 1, self.environment.grid.shape[1] - 1
self.goal = end
# Use heapq;the thread safety provided by ProrityQueue is not needed, as we only exec on a single thread
open = [start]
start.f = start.g = start.h = 0
open_cost = {start: start.f}
closed = set()
while open:
node = heappop(open)
open_cost.pop(node)
if node in closed:
continue
closed.add(node)
if node == end:
return _reconstruct_path(end, grid)
current_cost = node.f
cy, cx = node.position
successors = []
for neighbour in environment.get_neighbours(node):
dx, dy = neighbour.position[1] - cx, neighbour.position[0] - cy
jump_point = self._jump(cy, cx, dy, dx)
if jump_point:
successors.append(Node(jump_point))
for successor in successors:
cost = current_cost + grid[successor.position]
if cost < successor.g:
successor.g = cost
h = self.heuristic(successor.position, end.position)
successor.h = h
successor.f = h + cost
if successor not in open_cost or successor.f < open_cost[successor]:
heappush(open, successor)
open_cost[successor] = successor.f
return None
def _jump(self, cy: int, cx: int, dy: int, dx: int) -> Tuple[int, int]:
ny, nx = cy + dy, cx + dx
if not self._is_passable(ny, nx):
return None
if nx == self.goal[1] and ny == self.goal[0]:
return ny, nx
if dx and dy:
# Diagonal case
if (self._is_passable(nx - dx, ny + dy) and not self._is_passable(nx - dx, ny)) or \
(self._is_passable(nx + dx, ny - dy) and not self._is_passable(nx, ny - dy)):
return ny, nx
# Orthogonal searches
if self._jump(ny, nx, dy, 0) or self._jump(ny, nx, 0, dx):
return ny, nx
else:
# Orthogonal case
if dx:
if (self._is_passable(nx + dx, ny + 1) and not self._is_passable(nx, ny + 1)) or \
(self._is_passable(nx + dx, ny - 1) and not self._is_passable(nx, ny - 1)):
return ny, nx
else: # dy
if (self._is_passable(nx + 1, ny + dy) and not self._is_passable(nx + 1, ny)) or \
(self._is_passable(nx - 1, ny + dy) and not self._is_passable(nx - 1, ny)):
return ny, nx
return self._jump(ny, nx, dy, dx)
def _is_passable(self, y, x):
if y < 0 or y > self._max_y or x < 0 or x > self._max_x:
return False
return self.environment.grid[y, x] > -1
|
py | 1a5208916695fc756d19c5d471015209a515d614 | #!/usr/bin/env python
"""Simple parsers for Linux files."""
import collections
import os
import re
import logging
from grr.lib import config_lib
from grr.lib import parsers
from grr.lib import utils
from grr.lib.rdfvalues import anomaly as rdf_anomaly
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import protodict as rdf_protodict
from grr.parsers import config_file
class PCIDevicesInfoParser(parsers.FileParser):
"""Parser for PCI devices' info files located in /sys/bus/pci/devices/*/*."""
output_types = ["PCIDevice"]
supported_artifacts = ["PCIDevicesInfoFiles"]
process_together = True
def ParseMultiple(self, stats, file_objects, unused_knowledge_base):
# Each file gives us only partial information for a particular PCI device.
# Iterate through all the files first to create a dictionary encapsulating
# complete information for each of the PCI device on the sysytem. We need
# all information for a PCI device before a proto for it can be created.
# We will store data in a dictionary of dictionaries that looks like this:
# data = { '0000:7f:0d.0': { 'class': '0x088000',
# 'vendor': '0x8086',
# 'device': '0x0ee1' } }
# The key is location of PCI device on system in extended B/D/F notation
# and value is a dictionary containing filename:data pairs for each file
# returned by artifact collection for that PCI device.
# Extended B/D/F is of form "domain:bus:device.function". Compile a regex
# so we can use it to skip parsing files that don't match it.
hc = r"[0-9A-Fa-f]"
bdf_regex = re.compile(r"^%s+:%s+:%s+\.%s+" % (hc, hc, hc, hc))
# This will make sure that when a non-existing 'key' (PCI location)
# is accessed for the first time a new 'key':{} pair is auto-created
data = collections.defaultdict(dict)
for stat, file_obj in zip(stats, file_objects):
filename = stat.pathspec.Basename()
# Location of PCI device is the name of parent directory of returned file.
bdf = stat.pathspec.Dirname().Basename()
# Make sure we only parse files that are under a valid B/D/F folder
if bdf_regex.match(bdf):
# Remove newlines from all files except config. Config contains raw data
# so we don't want to touch it even if it has a newline character.
file_data = file_obj.read(100000)
if filename != "config":
file_data = file_data.rstrip("\n")
data[bdf][filename] = file_data
# Now that we've captured all information for each PCI device. Let's convert
# the dictionary into a list of PCIDevice protos.
for bdf, bdf_filedata in data.iteritems():
pci_device = rdf_client.PCIDevice()
bdf_split = bdf.split(":")
df_split = bdf_split[2].split(".")
# We'll convert the hex into decimal to store in the protobuf.
pci_device.domain = int(bdf_split[0], 16)
pci_device.bus = int(bdf_split[1], 16)
pci_device.device = int(df_split[0], 16)
pci_device.function = int(df_split[1], 16)
pci_device.class_id = bdf_filedata.get("class")
pci_device.vendor = bdf_filedata.get("vendor")
pci_device.vendor_device_id = bdf_filedata.get("device")
pci_device.config = bdf_filedata.get("config")
yield pci_device
class PasswdParser(parsers.FileParser):
"""Parser for passwd files. Yields User semantic values."""
output_types = ["User"]
supported_artifacts = ["UnixPasswd"]
@classmethod
def ParseLine(cls, index, line):
fields = "username,password,uid,gid,fullname,homedir,shell".split(",")
try:
if not line:
return
dat = dict(zip(fields, line.split(":")))
user = rdf_client.User(
username=dat["username"],
uid=int(dat["uid"]),
homedir=dat["homedir"],
shell=dat["shell"],
gid=int(dat["gid"]),
full_name=dat["fullname"])
return user
except (IndexError, KeyError):
raise parsers.ParseError("Invalid passwd file at line %d. %s" % (
(index + 1), line))
def Parse(self, stat, file_object, knowledge_base):
"""Parse the passwd file."""
_, _ = stat, knowledge_base
lines = [l.strip() for l in file_object.read(100000).splitlines()]
for index, line in enumerate(lines):
line = self.ParseLine(index, line)
if line:
yield line
class PasswdBufferParser(parsers.GrepParser):
"""Parser for lines grepped from passwd files."""
output_types = ["User"]
supported_artifacts = ["LinuxPasswdHomedirs", "NssCacheLinuxPasswdHomedirs"]
def Parse(self, filefinderresult, knowledge_base):
_ = knowledge_base
for index, line in enumerate([x.data for x in filefinderresult.matches]):
line = PasswdParser.ParseLine(index, line.strip())
if line:
yield line
class UtmpStruct(utils.Struct):
"""Parse wtmp file from utmp.h."""
_fields = [
("h", "ut_type"),
("i", "pid"),
("32s", "line"),
("4s", "id"),
("32s", "user"),
("256s", "host"),
("i", "exit"),
("i", "session"),
("i", "sec"),
("i", "usec"),
("i", "ip_1"),
("i", "ip_2"),
("i", "ip_3"),
("i", "ip_4"),
("20s", "nothing"),
]
class LinuxWtmpParser(parsers.FileParser):
"""Simplified parser for linux wtmp files.
Yields User semantic values for USER_PROCESS events.
"""
output_types = ["User"]
supported_artifacts = ["LinuxWtmp"]
def Parse(self, stat, file_object, knowledge_base):
"""Parse the wtmp file."""
_, _ = stat, knowledge_base
users = {}
wtmp = file_object.read(10000000)
while wtmp:
try:
record = UtmpStruct(wtmp)
except RuntimeError:
break
wtmp = wtmp[record.size:]
# Users only appear for USER_PROCESS events, others are system.
if record.ut_type != 7:
continue
# Lose the null termination
record.user = record.user.split("\x00", 1)[0]
# Store the latest login time.
# TODO(user): remove the 0 here once RDFDatetime can support times
# pre-epoch properly.
try:
users[record.user] = max(users[record.user], record.sec, 0)
except KeyError:
users[record.user] = record.sec
for user, last_login in users.iteritems():
yield rdf_client.User(
username=utils.SmartUnicode(user), last_logon=last_login * 1000000)
class NetgroupParser(parsers.FileParser):
"""Parser that extracts users from a netgroup file."""
output_types = ["User"]
supported_artifacts = ["NetgroupConfiguration"]
# From useradd man page
USERNAME_REGEX = r"^[a-z_][a-z0-9_-]{0,30}[$]?$"
@classmethod
def ParseLines(cls, lines):
users = set()
filter_regexes = [
re.compile(x)
for x in config_lib.CONFIG["Artifacts.netgroup_filter_regexes"]
]
username_regex = re.compile(cls.USERNAME_REGEX)
blacklist = config_lib.CONFIG["Artifacts.netgroup_user_blacklist"]
for index, line in enumerate(lines):
if line.startswith("#"):
continue
splitline = line.split(" ")
group_name = splitline[0]
if filter_regexes:
filter_match = False
for regex in filter_regexes:
if regex.search(group_name):
filter_match = True
break
if not filter_match:
continue
for member in splitline[1:]:
if member.startswith("("):
try:
_, user, _ = member.split(",")
if user not in users and user not in blacklist:
if not username_regex.match(user):
yield rdf_anomaly.Anomaly(
type="PARSER_ANOMALY",
symptom="Invalid username: %s" % user)
else:
users.add(user)
yield rdf_client.User(username=utils.SmartUnicode(user))
except ValueError:
raise parsers.ParseError("Invalid netgroup file at line %d: %s" %
(index + 1, line))
def Parse(self, stat, file_object, knowledge_base):
"""Parse the netgroup file and return User objects.
Lines are of the form:
group1 (-,user1,) (-,user2,) (-,user3,)
Groups are ignored, we return users in lines that match the filter regexes,
or all users in the file if no filters are specified.
We assume usernames are in the default regex format specified in the adduser
man page. Notably no non-ASCII characters.
Args:
stat: unused statentry
file_object: netgroup VFSFile
knowledge_base: unused
Returns:
rdf_client.User
"""
_, _ = stat, knowledge_base
lines = [l.strip() for l in file_object.read(100000).splitlines()]
return self.ParseLines(lines)
class NetgroupBufferParser(parsers.GrepParser):
"""Parser for lines grepped from /etc/netgroup files."""
output_types = ["User"]
def Parse(self, filefinderresult, knowledge_base):
_ = knowledge_base
return NetgroupParser.ParseLines(
[x.data.strip() for x in filefinderresult.matches])
class LinuxBaseShadowParser(parsers.FileParser):
"""Base parser to process user/groups with shadow files."""
# A list of hash types and hash matching expressions.
hashes = [("SHA512", re.compile(r"\$6\$[A-z\d\./]{0,16}\$[A-z\d\./]{86}$")),
("SHA256", re.compile(r"\$5\$[A-z\d\./]{0,16}\$[A-z\d\./]{43}$")),
("DISABLED", re.compile(r"!.*")), ("UNSET", re.compile(r"\*.*")),
("MD5", re.compile(r"\$1\$([A-z\d\./]{1,8}\$)?[A-z\d\./]{22}$")),
("DES", re.compile(r"[A-z\d\./]{2}.{11}$")),
("BLOWFISH", re.compile(r"\$2a?\$\d\d\$[A-z\d\.\/]{22}$")),
("NTHASH", re.compile(r"\$3\$")), ("UNUSED", re.compile(r"\$4\$"))]
# Prevents this from automatically registering.
__abstract = True # pylint: disable=g-bad-name
base_store = None
shadow_store = None
def __init__(self, *args, **kwargs):
super(LinuxBaseShadowParser, self).__init__(*args, **kwargs)
# Entries as defined by "getent", i.e. account databases used by nsswitch.
self.entry = {}
# Shadow files
self.shadow = {}
def GetPwStore(self, pw_attr):
"""Decide if the passwd field is a passwd or a reference to shadow.
Evaluates the contents of the password field to determine how the password
is stored.
- If blank either no password is required or no access is granted.
This behavior is system and application dependent.
- If 'x', the encrypted password is stored in /etc/shadow.
- Otherwise, the password is any other string, it's treated as an encrypted
password.
Args:
pw_attr: The password field as a string.
Returns:
An enum indicating the location of the password store.
"""
# PwEntry.PwStore enum values.
if pw_attr == "x":
return self.shadow_store
return self.base_store
def GetHashType(self, hash_str):
"""Identify the type of hash in a hash string.
Args:
hash_str: A string value that may be a hash.
Returns:
A string description of the type of hash.
"""
# Return the type of the first matching hash.
for hash_type, hash_re in self.hashes:
if hash_re.match(hash_str):
return hash_type
# No hash matched.
return "EMPTY"
def _ParseFile(self, file_obj, line_parser):
"""Process a file line by line.
Args:
file_obj: The file to parse.
line_parser: The parser method used to process and store line content.
Raises:
parser.ParseError if the parser is unable to process the line.
"""
lines = [l.strip() for l in file_obj.read(100000).splitlines()]
try:
for index, line in enumerate(lines):
if line:
line_parser(line)
except (IndexError, KeyError) as e:
raise parsers.ParseError("Invalid file at line %d: %s" % (index + 1, e))
def ReconcileShadow(self, store_type):
"""Verify that entries that claim to use shadow files have a shadow entry.
If the entries of the non-shadowed file indicate that a shadow file is used,
check that there is actually an entry for that file in shadow.
Args:
store_type: The type of password store that should be used (e.g.
/etc/shadow or /etc/gshadow)
"""
for k, v in self.entry.iteritems():
if v.pw_entry.store == store_type:
shadow_entry = self.shadow.get(k)
if shadow_entry:
v.pw_entry = shadow_entry
else:
v.pw_entry.store = "UNKNOWN"
def _Anomaly(self, msg, found):
return rdf_anomaly.Anomaly(
type="PARSER_ANOMALY", symptom=msg, finding=found)
@staticmethod
def MemberDiff(data1, set1_name, data2, set2_name):
"""Helper method to perform bidirectional set differences."""
set1 = set(data1)
set2 = set(data2)
diffs = []
msg = "Present in %s, missing in %s: %s"
if set1 != set2:
in_set1 = set1 - set2
in_set2 = set2 - set1
if in_set1:
diffs.append(msg % (set1_name, set2_name, ",".join(in_set1)))
if in_set2:
diffs.append(msg % (set2_name, set1_name, ",".join(in_set2)))
return diffs
def ParseMultiple(self, stats, file_objs, kb):
"""Process files together."""
fileset = {stat.pathspec.path: obj for stat, obj in zip(stats, file_objs)}
return self.ParseFileset(fileset)
class LinuxSystemGroupParser(LinuxBaseShadowParser):
"""Parser for group files. Yields Group semantic values."""
output_types = ["Group"]
supported_artifacts = ["LoginPolicyConfiguration"]
process_together = True
base_store = "GROUP"
shadow_store = "GSHADOW"
def __init__(self, *args, **kwargs):
super(LinuxSystemGroupParser, self).__init__(*args, **kwargs)
self.gshadow_members = {}
def ParseGshadowEntry(self, line):
"""Extract the members of each group from /etc/gshadow.
Identifies the groups in /etc/gshadow and several attributes of the group,
including how the password is crypted (if set).
gshadow files have the format group_name:passwd:admins:members
admins are both group members and can manage passwords and memberships.
Args:
line: An entry in gshadow.
"""
fields = ("name", "passwd", "administrators", "members")
if line:
rslt = dict(zip(fields, line.split(":")))
# Add the shadow state to the internal store.
name = rslt["name"]
pw_entry = self.shadow.setdefault(name, rdf_client.PwEntry())
pw_entry.store = self.shadow_store
pw_entry.hash_type = self.GetHashType(rslt["passwd"])
# Add the members to the internal store.
members = self.gshadow_members.setdefault(name, set())
for accts in rslt["administrators"], rslt["members"]:
if accts:
members.update(accts.split(","))
def ParseGroupEntry(self, line):
"""Extract the members of a group from /etc/group."""
fields = ("name", "passwd", "gid", "members")
if line:
rslt = dict(zip(fields, line.split(":")))
name = rslt["name"]
group = self.entry.setdefault(name, rdf_client.Group(name=name))
group.pw_entry.store = self.GetPwStore(rslt["passwd"])
if group.pw_entry.store == self.base_store:
group.pw_entry.hash_type = self.GetHashType(rslt["passwd"])
# If the group contains NIS entries, they may not have a gid.
if rslt["gid"]:
group.gid = int(rslt["gid"])
group.members = set(rslt["members"].split(","))
def MergeMembers(self):
"""Add shadow group members to the group if gshadow is used.
Normally group and shadow should be in sync, but no guarantees. Merges the
two stores as membership in either file may confer membership.
"""
for group_name, members in self.gshadow_members.iteritems():
group = self.entry.get(group_name)
if group and group.pw_entry.store == self.shadow_store:
group.members = members.union(group.members)
def FindAnomalies(self):
"""Identify any anomalous group attributes or memberships."""
for grp_name, group in self.entry.iteritems():
shadow = self.shadow.get(grp_name)
gshadows = self.gshadow_members.get(grp_name, [])
if shadow:
diff = self.MemberDiff(group.members, "group", gshadows, "gshadow")
if diff:
msg = "Group/gshadow members differ in group: %s" % grp_name
yield self._Anomaly(msg, diff)
diff = self.MemberDiff(self.entry, "group", self.gshadow_members, "gshadow")
if diff:
yield self._Anomaly("Mismatched group and gshadow files.", diff)
def ParseFileset(self, fileset=None):
"""Process linux system group and gshadow files.
Orchestrates collection of account entries from /etc/group and /etc/gshadow.
The group and gshadow entries are reconciled and member users are added to
the entry.
Args:
fileset: A dict of files mapped from path to an open file.
Yields:
- A series of Group entries, each of which is populated with group
[memberships and indications of the shadow state of any group password.
- A series of anomalies in cases where there are mismatches between group
and gshadow states.
"""
# Get relevant shadow attributes.
gshadow = fileset.get("/etc/gshadow")
if gshadow:
self._ParseFile(gshadow, self.ParseGshadowEntry)
else:
logging.debug("No /etc/gshadow file.")
group = fileset.get("/etc/group")
if group:
self._ParseFile(group, self.ParseGroupEntry)
else:
logging.debug("No /etc/group file.")
self.ReconcileShadow(self.shadow_store)
# Identify any anomalous group/shadow entries.
# This needs to be done before memberships are merged: merged memberships
# are the *effective* membership regardless of wierd configurations.
for anom in self.FindAnomalies():
yield anom
# Then add shadow group members to the group membership.
self.MergeMembers()
for group in self.entry.values():
yield group
class LinuxSystemPasswdParser(LinuxBaseShadowParser):
"""Parser for local accounts."""
output_types = ["User"]
supported_artifacts = ["LoginPolicyConfiguration"]
process_together = True
base_store = "PASSWD"
shadow_store = "SHADOW"
def __init__(self, *args, **kwargs):
super(LinuxSystemPasswdParser, self).__init__(*args, **kwargs)
self.groups = {} # Groups mapped by name.
self.memberships = {} # Group memberships per user.
self.uids = {} # Assigned uids
self.gids = {} # Assigned gids
def ParseShadowEntry(self, line):
"""Extract the user accounts in /etc/shadow.
Identifies the users in /etc/shadow and several attributes of their account,
including how their password is crypted and password aging characteristics.
Args:
line: An entry of the shadow file.
"""
fields = ("login", "passwd", "last_change", "min_age", "max_age",
"warn_time", "inactivity", "expire", "reserved")
if line:
rslt = dict(zip(fields, line.split(":")))
pw_entry = self.shadow.setdefault(rslt["login"], rdf_client.PwEntry())
pw_entry.store = self.shadow_store
pw_entry.hash_type = self.GetHashType(rslt["passwd"])
# Tread carefully here in case these values aren't set.
last_change = rslt.get("last_change")
if last_change:
pw_entry.age = int(last_change)
max_age = rslt.get("max_age")
if max_age:
pw_entry.max_age = int(max_age)
def ParsePasswdEntry(self, line):
"""Process the passwd entry fields and primary group memberships."""
fields = ("uname", "passwd", "uid", "gid", "fullname", "homedir", "shell")
if line:
rslt = dict(zip(fields, line.split(":")))
user = self.entry.setdefault(rslt["uname"], rdf_client.User())
user.username = rslt["uname"]
user.pw_entry.store = self.GetPwStore(rslt["passwd"])
if user.pw_entry.store == self.base_store:
user.pw_entry.hash_type = self.GetHashType(rslt["passwd"])
# If the passwd file contains NIS entries they may not have uid/gid set.
if rslt["uid"]:
user.uid = int(rslt["uid"])
if rslt["gid"]:
user.gid = int(rslt["gid"])
user.homedir = rslt["homedir"]
user.shell = rslt["shell"]
user.full_name = rslt["fullname"]
# Map uid numbers to detect duplicates.
uids = self.uids.setdefault(user.uid, set())
uids.add(user.username)
# Map primary group memberships to populate memberships.
gid = self.gids.setdefault(user.gid, set())
gid.add(user.username)
def _Members(self, group):
"""Unify members of a group and accounts with the group as primary gid."""
group.members = set(group.members).union(self.gids.get(group.gid, []))
return group
def AddGroupMemberships(self):
"""Adds aggregate group membership from group, gshadow and passwd."""
self.groups = {g.name: self._Members(g) for g in self.groups.itervalues()}
# Map the groups a user is a member of, irrespective of primary/extra gid.
for g in self.groups.itervalues():
for user in g.members:
membership = self.memberships.setdefault(user, set())
membership.add(g.gid)
# Now add the completed membership to the user account.
for user in self.entry.itervalues():
user.gids = self.memberships.get(user.username)
def FindAnomalies(self):
"""Identify anomalies in the password/shadow and group/gshadow data."""
# Find anomalous group entries.
findings = []
group_entries = {g.gid for g in self.groups.itervalues()}
for gid in set(self.gids) - group_entries:
undefined = ",".join(self.gids.get(gid, []))
findings.append("gid %d assigned without /etc/groups entry: %s" %
(gid, undefined))
if findings:
yield self._Anomaly("Accounts with invalid gid.", findings)
# Find any shared user IDs.
findings = []
for uid, names in self.uids.iteritems():
if len(names) > 1:
findings.append("uid %d assigned to multiple accounts: %s" %
(uid, ",".join(sorted(names))))
if findings:
yield self._Anomaly("Accounts with shared uid.", findings)
# Find privileged groups with unusual members.
findings = []
root_grp = self.groups.get("root")
if root_grp:
root_members = sorted([m for m in root_grp.members if m != "root"])
if root_members:
findings.append("Accounts in 'root' group: %s" % ",".join(root_members))
if findings:
yield self._Anomaly("Privileged group with unusual members.", findings)
# Find accounts without passwd/shadow entries.
diffs = self.MemberDiff(self.entry, "passwd", self.shadow, "shadow")
if diffs:
yield self._Anomaly("Mismatched passwd and shadow files.", diffs)
def AddPassword(self, fileset):
"""Add the passwd entries to the shadow store."""
passwd = fileset.get("/etc/passwd")
if passwd:
self._ParseFile(passwd, self.ParsePasswdEntry)
else:
logging.debug("No /etc/passwd file.")
def AddShadow(self, fileset):
"""Add the shadow entries to the shadow store."""
shadow = fileset.get("/etc/shadow")
if shadow:
self._ParseFile(shadow, self.ParseShadowEntry)
else:
logging.debug("No /etc/shadow file.")
def ParseFileset(self, fileset=None):
"""Process linux system login files.
Orchestrates collection of account entries from /etc/passwd and
/etc/shadow. The passwd and shadow entries are reconciled and group
memberships are mapped to the account.
Args:
fileset: A dict of files mapped from path to an open file.
Yields:
- A series of User entries, each of which is populated with
group memberships and indications of the shadow state of the account.
- A series of anomalies in cases where there are mismatches between passwd
and shadow state.
"""
self.AddPassword(fileset)
self.AddShadow(fileset)
self.ReconcileShadow(self.shadow_store)
# Get group memberships using the files that were already collected.
# Separate out groups and anomalies.
for rdf in LinuxSystemGroupParser().ParseFileset(fileset):
if isinstance(rdf, rdf_client.Group):
self.groups[rdf.name] = rdf
else:
yield rdf
self.AddGroupMemberships()
for user in self.entry.values():
yield user
for grp in self.groups.values():
yield grp
for anom in self.FindAnomalies():
yield anom
class PathParser(parsers.FileParser):
"""Parser for dotfile entries.
Extracts path attributes from dotfiles to infer effective paths for users.
This parser doesn't attempt or expect to determine path state for all cases,
rather, it is a best effort attempt to detect common misconfigurations. It is
not intended to detect maliciously obfuscated path modifications.
"""
output_types = ["AttributedDict"]
# TODO(user): Modify once a decision is made on contextual selection of
# parsed results for artifact data.
supported_artifacts = [
"GlobalShellConfigs", "RootUserShellConfigs", "UsersShellConfigs"
]
# https://cwe.mitre.org/data/definitions/426.html
_TARGETS = ("CLASSPATH", "LD_AOUT_LIBRARY_PATH", "LD_AOUT_PRELOAD",
"LD_LIBRARY_PATH", "LD_PRELOAD", "MODULE_PATH", "PATH",
"PERL5LIB", "PERLLIB", "PYTHONPATH", "RUBYLIB")
_SH_CONTINUATION = ("{", "}", "||", "&&", "export")
_CSH_FILES = (".login", ".cshrc", ".tcsh", "csh.cshrc", "csh.login",
"csh.logout")
# This matches "set a = (b . ../../.. )", "set a=(. b c)" etc.
_CSH_SET_RE = re.compile(r"(\w+)\s*=\s*\((.*)\)$")
# This matches $PATH, ${PATH}, "$PATH" and "${ PATH }" etc.
# Omits more fancy parameter expansion e.g. ${unset_val:=../..}
_SHELLVAR_RE = re.compile(r'"?\$\{?\s*(\w+)\s*\}?"?')
def __init__(self):
super(PathParser, self).__init__()
# Terminate entries on ";" to capture multiple values on one line.
self.parser = config_file.FieldParser(term=r"[\r\n;]")
def _ExpandPath(self, target, vals, paths):
"""Extract path information, interpolating current path values as needed."""
if target not in self._TARGETS:
return
expanded = []
for val in vals:
# Null entries specify the current directory, so :a::b:c: is equivalent
# to .:a:.:b:c:.
shellvar = self._SHELLVAR_RE.match(val)
if not val:
expanded.append(".")
elif shellvar:
# The value may actually be in braces as well. Always convert to upper
# case so we deal with stuff like lowercase csh path.
existing = paths.get(shellvar.group(1).upper())
if existing:
expanded.extend(existing)
else:
expanded.append(val)
else:
expanded.append(val)
paths[target] = expanded
def _ParseShVariables(self, lines):
"""Extract env_var and path values from sh derivative shells.
Iterates over each line, word by word searching for statements that set the
path. These are either variables, or conditions that would allow a variable
to be set later in the line (e.g. export).
Args:
lines: A list of lines, each of which is a list of space separated words.
Returns:
a dictionary of path names and values.
"""
paths = {}
for line in lines:
for entry in line:
if "=" in entry:
# Pad out the list so that it's always 2 elements, even if the split
# failed.
target, vals = (entry.split("=", 1) + [""])[:2]
if vals:
path_vals = vals.split(":")
else:
path_vals = []
self._ExpandPath(target, path_vals, paths)
elif entry not in self._SH_CONTINUATION:
# Stop processing the line unless the entry might allow paths to still
# be set, e.g.
# reserved words: "export"
# conditions: { PATH=VAL } && PATH=:$PATH || PATH=.
break
return paths
def _ParseCshVariables(self, lines):
"""Extract env_var and path values from csh derivative shells.
Path attributes can be set several ways:
- setenv takes the form "setenv PATH_NAME COLON:SEPARATED:LIST"
- set takes the form "set path_name=(space separated list)" and is
automatically exported for several types of files.
The first entry in each stanza is used to decide what context to use.
Other entries are used to identify the path name and any assigned values.
Args:
lines: A list of lines, each of which is a list of space separated words.
Returns:
a dictionary of path names and values.
"""
paths = {}
for line in lines:
if len(line) < 2:
continue
action = line[0]
if action == "setenv":
target = line[1]
path_vals = []
if line[2:]:
path_vals = line[2].split(":")
self._ExpandPath(target, path_vals, paths)
elif action == "set":
set_vals = self._CSH_SET_RE.search(" ".join(line[1:]))
if set_vals:
target, vals = set_vals.groups()
# Automatically exported to ENV vars.
if target in ("path", "term", "user"):
target = target.upper()
path_vals = vals.split()
self._ExpandPath(target, path_vals, paths)
return paths
def Parse(self, stat, file_obj, knowledge_base):
"""Identifies the paths set within a file.
Expands paths within the context of the file, but does not infer fully
expanded paths from external states. There are plenty of cases where path
attributes are unresolved, e.g. sourcing other files.
Lines are not handled literally. A field parser is used to:
- Break lines with multiple distinct statements into separate lines (e.g.
lines with a ';' separating stanzas.
- Strip out comments.
- Handle line continuations to capture multi-line configurations into one
statement.
Args:
stat: statentry
file_obj: VFSFile
knowledge_base: unused
Yields:
An attributed dict for each env vars. 'name' contains the path name, and
'vals' contains its vals.
"""
_ = knowledge_base
lines = self.parser.ParseEntries(file_obj.read())
if os.path.basename(stat.pathspec.path) in self._CSH_FILES:
paths = self._ParseCshVariables(lines)
else:
paths = self._ParseShVariables(lines)
for path_name, path_vals in paths.iteritems():
yield rdf_protodict.AttributedDict(
config=stat.pathspec.path, name=path_name, vals=path_vals)
|
py | 1a5208c897fa2fd3892fedc466984e83371257ed | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-07-06 22:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_type', models.CharField(choices=[(b'VEGETABLE', b'VEGETABLE'), (b'FRUIT', b'FRUIT')], default=b'VEGETABLE', max_length=200)),
('quantity_type', models.CharField(choices=[(b'DOZEN', b'DOZEN'), (b'KILOGRAM', b'KILOGRAM')], default=b'KILOGRAM', max_length=200)),
],
),
]
|
py | 1a5209b941eac066d9c9f4d898eae5ed3ec88427 | import re
import string
import json
import numpy as np
import pandas as pd
from sklearn.externals import joblib
from sklearn.feature_extraction import text
import sklearn.preprocessing as sk_prep
import nltk
from nltk.tokenize import TweetTokenizer
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import stopwords
from .base import BaseTransformer
lem = WordNetLemmatizer()
tokenizer = TweetTokenizer()
nltk.download('wordnet')
nltk.download('stopwords')
eng_stopwords = set(stopwords.words("english"))
with open('external_data/apostrophes.json', 'r') as f:
APPO = json.load(f)
class WordListFilter(BaseTransformer):
def __init__(self, word_list_filepath):
self.word_set = self._read_data(word_list_filepath)
def transform(self, X):
X = self._transform(X)
return {'X': X}
def _transform(self, X):
X = pd.DataFrame(X, columns=['text']).astype(str)
X['text'] = X['text'].apply(self._filter_words)
return X['text'].values
def _filter_words(self, x):
x = x.lower()
x = ' '.join([w for w in x.split() if w in self.word_set])
return x
def _read_data(self, filepath):
with open(filepath, 'r+') as f:
data = f.read()
return set(data.split('\n'))
def load(self, filepath):
return self
def save(self, filepath):
joblib.dump({}, filepath)
class TextCleaner(BaseTransformer):
def __init__(self, drop_punctuation, drop_newline, drop_multispaces,
all_lower_case, fill_na_with, deduplication_threshold, anonymize, apostrophes, use_stopwords):
self.drop_punctuation = drop_punctuation
self.drop_newline = drop_newline
self.drop_multispaces = drop_multispaces
self.all_lower_case = all_lower_case
self.fill_na_with = fill_na_with
self.deduplication_threshold = deduplication_threshold
self.anonymize = anonymize
self.apostrophes = apostrophes
self.use_stopwords = use_stopwords
def transform(self, X):
X = pd.DataFrame(X, columns=['text']).astype(str)
X['text'] = X['text'].apply(self._transform)
if self.fill_na_with:
X['text'] = X['text'].fillna(self.fill_na_with).values
return {'X': X['text'].values}
def _transform(self, x):
if self.all_lower_case:
x = self._lower(x)
if self.drop_punctuation:
x = self._remove_punctuation(x)
if self.drop_newline:
x = self._remove_newline(x)
if self.drop_multispaces:
x = self._substitute_multiple_spaces(x)
if self.deduplication_threshold is not None:
x = self._deduplicate(x)
if self.anonymize:
x = self._anonymize(x)
if self.apostrophes:
x = self._apostrophes(x)
if self.use_stopwords:
x = self._use_stopwords(x)
return x
def _use_stopwords(self, x):
words = tokenizer.tokenize(x)
words = [w for w in words if not w in eng_stopwords]
x = " ".join(words)
return x
def _apostrophes(self, x):
words = tokenizer.tokenize(x)
words = [APPO[word] if word in APPO else word for word in words]
words = [lem.lemmatize(word, "v") for word in words]
words = [w for w in words if not w in eng_stopwords]
x = " ".join(words)
return x
def _anonymize(self, x):
# remove leaky elements like ip,user
x = re.sub("\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", x)
# removing usernames
x = re.sub("\[\[.*\]", " ", x)
return x
def _lower(self, x):
return x.lower()
def _remove_punctuation(self, x):
return re.sub(r'[^\w\s]', ' ', x)
def _remove_newline(self, x):
x = x.replace('\n', ' ')
x = x.replace('\n\n', ' ')
return x
def _substitute_multiple_spaces(self, x):
return ' '.join(x.split())
def _deduplicate(self, x):
word_list = x.split()
num_words = len(word_list)
if num_words == 0:
return x
else:
num_unique_words = len(set(word_list))
unique_ratio = num_words / num_unique_words
if unique_ratio > self.deduplication_threshold:
x = ' '.join(x.split()[:num_unique_words])
return x
def load(self, filepath):
params = joblib.load(filepath)
self.drop_punctuation = params['drop_punctuation']
self.all_lower_case = params['all_lower_case']
self.fill_na_with = params['fill_na_with']
return self
def save(self, filepath):
params = {'drop_punctuation': self.drop_punctuation,
'all_lower_case': self.all_lower_case,
'fill_na_with': self.fill_na_with,
}
joblib.dump(params, filepath)
class XYSplit(BaseTransformer):
def __init__(self, x_columns, y_columns):
self.x_columns = x_columns
self.y_columns = y_columns
def transform(self, meta, train_mode):
X = meta[self.x_columns].values
if train_mode:
y = meta[self.y_columns].values
else:
y = None
return {'X': X,
'y': y}
def load(self, filepath):
params = joblib.load(filepath)
self.columns_to_get = params['x_columns']
self.target_columns = params['y_columns']
return self
def save(self, filepath):
params = {'x_columns': self.x_columns,
'y_columns': self.y_columns
}
joblib.dump(params, filepath)
class TfidfVectorizer(BaseTransformer):
def __init__(self, **kwargs):
self.vectorizer = text.TfidfVectorizer(**kwargs)
def fit(self, text):
self.vectorizer.fit(text)
return self
def transform(self, text):
return {'features': self.vectorizer.transform(text)}
def load(self, filepath):
self.vectorizer = joblib.load(filepath)
return self
def save(self, filepath):
joblib.dump(self.vectorizer, filepath)
class TextCounter(BaseTransformer):
def transform(self, X):
X = pd.DataFrame(X, columns=['text']).astype(str)
X = X['text'].apply(self._transform)
X['caps_vs_length'] = X.apply(lambda row: float(row['upper_case_count']) / float(row['char_count']), axis=1)
X['num_symbols'] = X['text'].apply(lambda comment: sum(comment.count(w) for w in '*&$%'))
X['num_words'] = X['text'].apply(lambda comment: len(comment.split()))
X['num_unique_words'] = X['text'].apply(lambda comment: len(set(w for w in comment.split())))
X['words_vs_unique'] = X['num_unique_words'] / X['num_words']
X['mean_word_len'] = X['text'].apply(lambda x: np.mean([len(w) for w in str(x).split()]))
X.drop('text', axis=1, inplace=True)
X.fillna(0.0, inplace=True)
return {'X': X}
def _transform(self, x):
features = {}
features['text'] = x
features['char_count'] = char_count(x)
features['word_count'] = word_count(x)
features['punctuation_count'] = punctuation_count(x)
features['upper_case_count'] = upper_case_count(x)
features['lower_case_count'] = lower_case_count(x)
features['digit_count'] = digit_count(x)
features['space_count'] = space_count(x)
features['newline_count'] = newline_count(x)
return pd.Series(features)
def load(self, filepath):
return self
def save(self, filepath):
joblib.dump({}, filepath)
class Normalizer(BaseTransformer):
def __init__(self):
self.normalizer = sk_prep.Normalizer()
def fit(self, X):
self.normalizer.fit(X)
return self
def transform(self, X):
X = self.normalizer.transform(X)
return {'X': X}
def load(self, filepath):
self.normalizer = joblib.load(filepath)
return self
def save(self, filepath):
joblib.dump(self.normalizer, filepath)
class MinMaxScaler(BaseTransformer):
def __init__(self):
self.minmax_scaler = sk_prep.MinMaxScaler()
def fit(self, X):
self.minmax_scaler.fit(X)
return self
def transform(self, X):
X = self.minmax_scaler.transform(X)
return {'X': X}
def load(self, filepath):
self.minmax_scaler = joblib.load(filepath)
return self
def save(self, filepath):
joblib.dump(self.minmax_scaler, filepath)
class MinMaxScalerMultilabel(BaseTransformer):
def __init__(self):
self.minmax_scalers = []
def fit(self, X):
for i in range(X.shape[1]):
minmax_scaler = sk_prep.MinMaxScaler()
minmax_scaler.fit(X[:, i, :])
self.minmax_scalers.append(minmax_scaler)
return self
def transform(self, X):
for i, minmax_scaler in enumerate(self.minmax_scalers):
X[:, i, :] = minmax_scaler.transform(X[:, i, :])
return {'X': X}
def load(self, filepath):
self.minmax_scalers = joblib.load(filepath)
return self
def save(self, filepath):
joblib.dump(self.minmax_scalers, filepath)
def char_count(x):
return len(x)
def word_count(x):
return len(x.split())
def newline_count(x):
return x.count('\n')
def upper_case_count(x):
return sum(c.isupper() for c in x)
def lower_case_count(x):
return sum(c.islower() for c in x)
def digit_count(x):
return sum(c.isdigit() for c in x)
def space_count(x):
return sum(c.isspace() for c in x)
def punctuation_count(x):
return occurence(x, string.punctuation)
def occurence(s1, s2):
return sum([1 for x in s1 if x in s2])
|
py | 1a520aabc7fb414a5dc946a69f6ae7b7c70ad744 | from django.urls import path
from .views import login_view, register_user
from django.contrib.auth.views import LogoutView
urlpatterns = [
path('login/', login_view, name="login"),
path('register/', register_user, name="register"),
path("logout/", LogoutView.as_view(), name="logout")
] |
py | 1a520b749f25d43eb694fe6b0ead3dea3a707082 | from math import radians
import numpy as np
import geopandas as gpd
import pandas as pd
from shapely.geometry import Point
from sklearn.cluster import DBSCAN
from tqdm import tqdm
from trackintel.geogr.distances import meters_to_decimal_degrees
def generate_locations(
staypoints,
method="dbscan",
epsilon=100,
num_samples=1,
distance_metric="haversine",
agg_level="user",
print_progress=False,
):
"""
Generate locations from the staypoints.
Parameters
----------
staypoints : GeoDataFrame (as trackintel staypoints)
The staypoints have to follow the standard definition for staypoints DataFrames.
method : {'dbscan'}
Method to create locations.
- 'dbscan' : Uses the DBSCAN algorithm to cluster staypoints.
epsilon : float, default 100
The epsilon for the 'dbscan' method. if 'distance_metric' is 'haversine'
or 'euclidean', the unit is in meters.
num_samples : int, default 1
The minimal number of samples in a cluster.
distance_metric: {'haversine', 'euclidean'}
The distance metric used by the applied method. Any mentioned below are possible:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise_distances.html
agg_level: {'user','dataset'}
The level of aggregation when generating locations:
- 'user' : locations are generated independently per-user.
- 'dataset' : shared locations are generated for all users.
print_progress : bool, default False
If print_progress is True, the progress bar is displayed
Returns
-------
ret_sp: GeoDataFrame (as trackintel staypoints)
The original staypoints with a new column ``[`location_id`]``.
ret_loc: GeoDataFrame (as trackintel locations)
The generated locations.
Examples
--------
>>> stps.as_staypoints.generate_locations(method='dbscan', epsilon=100, num_samples=1)
"""
if agg_level not in ["user", "dataset"]:
raise AttributeError("The parameter agg_level must be one of ['user', 'dataset'].")
if method not in ["dbscan"]:
raise AttributeError("The parameter method must be one of ['dbscan'].")
# initialize the return GeoDataFrames
ret_stps = staypoints.copy()
ret_stps = ret_stps.sort_values(["user_id", "started_at"])
geo_col = ret_stps.geometry.name
if method == "dbscan":
if distance_metric == "haversine":
# The input and output of sklearn's harvarsine metrix are both in radians,
# see https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.haversine_distances.html
# here the 'epsilon' is directly applied to the metric's output.
# convert to radius
db = DBSCAN(eps=epsilon / 6371000, min_samples=num_samples, algorithm="ball_tree", metric=distance_metric)
else:
db = DBSCAN(eps=epsilon, min_samples=num_samples, algorithm="ball_tree", metric=distance_metric)
if agg_level == "user":
if print_progress:
tqdm.pandas(desc="User location generation")
ret_stps = ret_stps.groupby("user_id", as_index=False).progress_apply(
_generate_locations_per_user,
geo_col=geo_col,
distance_metric=distance_metric,
db=db,
)
else:
ret_stps = ret_stps.groupby("user_id", as_index=False).apply(
_generate_locations_per_user,
geo_col=geo_col,
distance_metric=distance_metric,
db=db,
)
# keeping track of noise labels
ret_stps_non_noise_labels = ret_stps[ret_stps["location_id"] != -1]
ret_stps_noise_labels = ret_stps[ret_stps["location_id"] == -1]
# sort so that the last location id of a user = max(location id)
ret_stps_non_noise_labels = ret_stps_non_noise_labels.sort_values(["user_id", "location_id"])
# identify start positions of new user_ids
start_of_user_id = ret_stps_non_noise_labels["user_id"] != ret_stps_non_noise_labels["user_id"].shift(1)
# calculate the offset (= last location id of the previous user)
# multiplication is to mask all positions where no new user starts and addition is to have a +1 when a
# new user starts
loc_id_offset = ret_stps_non_noise_labels["location_id"].shift(1) * start_of_user_id + start_of_user_id
# fill first nan with 0 and create the cumulative sum
loc_id_offset = loc_id_offset.fillna(0).cumsum()
ret_stps_non_noise_labels["location_id"] = ret_stps_non_noise_labels["location_id"] + loc_id_offset
ret_stps = gpd.GeoDataFrame(pd.concat([ret_stps_non_noise_labels, ret_stps_noise_labels]), geometry=geo_col)
ret_stps.sort_values(["user_id", "started_at"], inplace=True)
else:
if distance_metric == "haversine":
# the input is converted to list of (lat, lon) tuples in radians unit
p = np.array([[radians(g.y), radians(g.x)] for g in ret_stps.geometry])
else:
p = np.array([[g.x, g.y] for g in ret_stps.geometry])
labels = db.fit_predict(p)
ret_stps["location_id"] = labels
### create locations as grouped staypoints
temp_sp = ret_stps[["user_id", "location_id", ret_stps.geometry.name]]
if agg_level == "user":
# directly dissolve by 'user_id' and 'location_id'
ret_loc = temp_sp.dissolve(by=["user_id", "location_id"], as_index=False)
else:
## generate user-location pairs with same geometries across users
# get user-location pairs
ret_loc = temp_sp.dissolve(by=["user_id", "location_id"], as_index=False).drop(
columns={temp_sp.geometry.name}
)
# get location geometries
geom_df = temp_sp.dissolve(by=["location_id"], as_index=False).drop(columns={"user_id"})
# merge pairs with location geometries
ret_loc = ret_loc.merge(geom_df, on="location_id", how="left")
# filter stps not belonging to locations
ret_loc = ret_loc.loc[ret_loc["location_id"] != -1]
ret_loc["center"] = None # initialize
# locations with only one staypoints is of type "Point"
point_idx = ret_loc.geom_type == "Point"
if not ret_loc.loc[point_idx].empty:
ret_loc.loc[point_idx, "center"] = ret_loc.loc[point_idx, ret_loc.geometry.name]
# locations with multiple staypoints is of type "MultiPoint"
if not ret_loc.loc[~point_idx].empty:
ret_loc.loc[~point_idx, "center"] = ret_loc.loc[~point_idx, ret_loc.geometry.name].apply(
lambda p: Point(np.array(p)[:, 0].mean(), np.array(p)[:, 1].mean())
)
# extent is the convex hull of the geometry
ret_loc["extent"] = None # initialize
if not ret_loc.empty:
ret_loc["extent"] = ret_loc[ret_loc.geometry.name].apply(lambda p: p.convex_hull)
# convex_hull of one point would be a Point and two points a Linestring,
# we change them into Polygon by creating a buffer of epsilon around them.
pointLine_idx = (ret_loc["extent"].geom_type == "LineString") | (ret_loc["extent"].geom_type == "Point")
if not ret_loc.loc[pointLine_idx].empty:
# Perform meter to decimal conversion if the distance metric is haversine
if distance_metric == "haversine":
ret_loc.loc[pointLine_idx, "extent"] = ret_loc.loc[pointLine_idx].apply(
lambda p: p["extent"].buffer(meters_to_decimal_degrees(epsilon, p["center"].y)), axis=1
)
else:
ret_loc.loc[pointLine_idx, "extent"] = ret_loc.loc[pointLine_idx].apply(
lambda p: p["extent"].buffer(epsilon), axis=1
)
ret_loc = ret_loc.set_geometry("center")
ret_loc = ret_loc[["user_id", "location_id", "center", "extent"]]
# index management
ret_loc.rename(columns={"location_id": "id"}, inplace=True)
ret_loc.set_index("id", inplace=True)
# stps not linked to a location receive np.nan in 'location_id'
ret_stps.loc[ret_stps["location_id"] == -1, "location_id"] = np.nan
## dtype consistency
# locs id (generated by this function) should be int64
ret_loc.index = ret_loc.index.astype("int64")
# location_id of stps can only be in Int64 (missing values)
ret_stps["location_id"] = ret_stps["location_id"].astype("Int64")
# user_id of ret_loc should be the same as ret_stps
ret_loc["user_id"] = ret_loc["user_id"].astype(ret_stps["user_id"].dtype)
return ret_stps, ret_loc
def _generate_locations_per_user(user_staypoints, distance_metric, db, geo_col):
"""function called after groupby: should only contain records of one user;
see generate_locations() function for parameter meaning."""
if distance_metric == "haversine":
# the input is converted to list of (lat, lon) tuples in radians unit
p = np.array([[radians(q.y), radians(q.x)] for q in (user_staypoints[geo_col])])
else:
p = np.array([[q.x, q.y] for q in (user_staypoints[geo_col])])
labels = db.fit_predict(p)
# add staypoint - location matching to original staypoints
user_staypoints["location_id"] = labels
user_staypoints = gpd.GeoDataFrame(user_staypoints, geometry=geo_col)
return user_staypoints
|
py | 1a520c5758d8bf430348752b8e0ffc5c4c612545 | #
# PySNMP MIB module Dell-SSL (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Dell-SSL
# Produced by pysmi-0.3.4 at Mon Apr 29 18:41:57 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint")
rnd, = mibBuilder.importSymbols("Dell-MIB", "rnd")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Bits, Gauge32, iso, Integer32, Counter32, Unsigned32, Counter64, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, ModuleIdentity, ObjectIdentity, MibIdentifier, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Gauge32", "iso", "Integer32", "Counter32", "Unsigned32", "Counter64", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "ModuleIdentity", "ObjectIdentity", "MibIdentifier", "TimeTicks")
TruthValue, DisplayString, TextualConvention, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "DisplayString", "TextualConvention", "RowStatus")
rlSsl = ModuleIdentity((1, 3, 6, 1, 4, 1, 89, 100))
rlSsl.setRevisions(('2003-09-21 00:00',))
if mibBuilder.loadTexts: rlSsl.setLastUpdated('200309210000Z')
if mibBuilder.loadTexts: rlSsl.setOrganization('Dell')
rlSslCertificateGenerationTable = MibTable((1, 3, 6, 1, 4, 1, 89, 100, 1), )
if mibBuilder.loadTexts: rlSslCertificateGenerationTable.setStatus('current')
rlSslCertificateGenerationEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 100, 1, 1), ).setIndexNames((0, "Dell-SSL", "rlSslCertificateGenerationIndex"))
if mibBuilder.loadTexts: rlSslCertificateGenerationEntry.setStatus('current')
rlSslCertificateGenerationIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 1, 1, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslCertificateGenerationIndex.setStatus('current')
rlSslCertificateGenerationId = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 1, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslCertificateGenerationId.setStatus('current')
rlSslCertificateGenerationCountryName = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslCertificateGenerationCountryName.setStatus('current')
rlSslCertificateGenerationStateOrProvinceName = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 1, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslCertificateGenerationStateOrProvinceName.setStatus('current')
rlSslCertificateGenerationLocalityName = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 1, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslCertificateGenerationLocalityName.setStatus('current')
rlSslCertificateGenerationOrganizationName = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 1, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslCertificateGenerationOrganizationName.setStatus('current')
rlSslCertificateGenerationOrganizationUnitName = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 1, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslCertificateGenerationOrganizationUnitName.setStatus('current')
rlSslCertificateGenerationCommonName = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 1, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslCertificateGenerationCommonName.setStatus('current')
rlSslCertificateGenerationValidDays = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 1, 1, 9), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslCertificateGenerationValidDays.setStatus('current')
rlSslCertificateGenerationRsaKeyLength = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(512, 2048))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslCertificateGenerationRsaKeyLength.setStatus('current')
rlSslCertificateGenerationPassphrase = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 1, 1, 11), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslCertificateGenerationPassphrase.setStatus('current')
rlSslCertificateGenerationAction = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("generateRsaKeyAndSelfSignedCertificate", 1), ("generateSelfSignedCertificate", 2), ("generatePkcs12", 3), ("generateCertificateRequest", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslCertificateGenerationAction.setStatus('current')
rlSslCertificateExportTable = MibTable((1, 3, 6, 1, 4, 1, 89, 100, 2), )
if mibBuilder.loadTexts: rlSslCertificateExportTable.setStatus('current')
rlSslCertificateExportEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 100, 2, 1), ).setIndexNames((0, "Dell-SSL", "rlSslCertificateExportId"), (0, "Dell-SSL", "rlSslCertificateExportType"), (0, "Dell-SSL", "rlSslCertificateExportFragmentId"))
if mibBuilder.loadTexts: rlSslCertificateExportEntry.setStatus('current')
rlSslCertificateExportId = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlSslCertificateExportId.setStatus('current')
rlSslCertificateExportType = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("certificateRequestPemFormat", 1), ("certificatePemFormat", 2), ("certificateOpenSslFormat", 3), ("certificateAndKeyPkcs12", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlSslCertificateExportType.setStatus('current')
rlSslCertificateExportFragmentId = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 2, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlSslCertificateExportFragmentId.setStatus('current')
rlSslCertificateExportFragmentText = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 2, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlSslCertificateExportFragmentText.setStatus('current')
rlSslCertificateSave = MibScalar((1, 3, 6, 1, 4, 1, 89, 100, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslCertificateSave.setStatus('current')
rlSslCertificateSaveFormat = MibScalar((1, 3, 6, 1, 4, 1, 89, 100, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("x509", 1), ("pkcs12", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslCertificateSaveFormat.setStatus('current')
rlSslImportedPKCS12CertificatePassphrase = MibScalar((1, 3, 6, 1, 4, 1, 89, 100, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(8, 96))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslImportedPKCS12CertificatePassphrase.setStatus('current')
rlSslCertificateImportTable = MibTable((1, 3, 6, 1, 4, 1, 89, 100, 6), )
if mibBuilder.loadTexts: rlSslCertificateImportTable.setStatus('current')
rlSslCertificateImportEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 100, 6, 1), ).setIndexNames((0, "Dell-SSL", "rlSslCertificateImportId"), (0, "Dell-SSL", "rlSslCertificateImportFormat"), (0, "Dell-SSL", "rlSslCertificateImportFragmentId"))
if mibBuilder.loadTexts: rlSslCertificateImportEntry.setStatus('current')
rlSslCertificateImportId = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 6, 1, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslCertificateImportId.setStatus('current')
rlSslCertificateImportFormat = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 6, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("x509", 1), ("pkcs12", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslCertificateImportFormat.setStatus('current')
rlSslCertificateImportFragmentId = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 6, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslCertificateImportFragmentId.setStatus('current')
rlSslCertificateImportFragmentText = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 6, 1, 4), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslCertificateImportFragmentText.setStatus('current')
rlSslCertificateImportFragmentStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 6, 1, 5), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslCertificateImportFragmentStatus.setStatus('current')
rlSslSSLv2Enable = MibScalar((1, 3, 6, 1, 4, 1, 89, 100, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslSSLv2Enable.setStatus('current')
class RlSslPublicKeyAlgorithm(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("rsa", 1), ("dsa", 2))
rlSslImportExportSelfKeyTable = MibTable((1, 3, 6, 1, 4, 1, 89, 100, 8), )
if mibBuilder.loadTexts: rlSslImportExportSelfKeyTable.setStatus('current')
rlSslImportExportSelfKeyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 100, 8, 1), ).setIndexNames((0, "Dell-SSL", "rlSslImportExportSelfKeyFormat"), (0, "Dell-SSL", "rlSslImportExportSelfKeyIndex"), (0, "Dell-SSL", "rlSslImportExportSelfKeyFragmentId"))
if mibBuilder.loadTexts: rlSslImportExportSelfKeyEntry.setStatus('current')
rlSslImportExportSelfKeyFormat = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 8, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("uuencoded-format", 1))))
if mibBuilder.loadTexts: rlSslImportExportSelfKeyFormat.setStatus('current')
rlSslImportExportSelfKeyIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 8, 1, 2), Integer32())
if mibBuilder.loadTexts: rlSslImportExportSelfKeyIndex.setStatus('current')
rlSslImportExportSelfKeyFragmentId = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 8, 1, 3), Integer32())
if mibBuilder.loadTexts: rlSslImportExportSelfKeyFragmentId.setStatus('current')
rlSslImportExportSelfKeyAlgorithm = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 8, 1, 4), RlSslPublicKeyAlgorithm()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslImportExportSelfKeyAlgorithm.setStatus('current')
rlSslImportExportSelfKeyFragmentText = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 100, 8, 1, 5), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslImportExportSelfKeyFragmentText.setStatus('current')
rlSslCertificateSave2 = MibScalar((1, 3, 6, 1, 4, 1, 89, 100, 9), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslCertificateSave2.setStatus('current')
rlSslisCertificate1Default = MibScalar((1, 3, 6, 1, 4, 1, 89, 100, 10), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslisCertificate1Default.setStatus('current')
rlSslisCertificate2Default = MibScalar((1, 3, 6, 1, 4, 1, 89, 100, 11), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSslisCertificate2Default.setStatus('current')
mibBuilder.exportSymbols("Dell-SSL", rlSslisCertificate1Default=rlSslisCertificate1Default, rlSslCertificateGenerationOrganizationName=rlSslCertificateGenerationOrganizationName, rlSslCertificateImportFormat=rlSslCertificateImportFormat, rlSslCertificateSave2=rlSslCertificateSave2, PYSNMP_MODULE_ID=rlSsl, rlSslCertificateGenerationCommonName=rlSslCertificateGenerationCommonName, rlSslImportExportSelfKeyFragmentId=rlSslImportExportSelfKeyFragmentId, RlSslPublicKeyAlgorithm=RlSslPublicKeyAlgorithm, rlSslImportedPKCS12CertificatePassphrase=rlSslImportedPKCS12CertificatePassphrase, rlSslImportExportSelfKeyTable=rlSslImportExportSelfKeyTable, rlSslImportExportSelfKeyEntry=rlSslImportExportSelfKeyEntry, rlSslCertificateGenerationPassphrase=rlSslCertificateGenerationPassphrase, rlSslCertificateSaveFormat=rlSslCertificateSaveFormat, rlSslisCertificate2Default=rlSslisCertificate2Default, rlSslCertificateExportType=rlSslCertificateExportType, rlSslCertificateGenerationCountryName=rlSslCertificateGenerationCountryName, rlSslImportExportSelfKeyAlgorithm=rlSslImportExportSelfKeyAlgorithm, rlSslCertificateGenerationRsaKeyLength=rlSslCertificateGenerationRsaKeyLength, rlSslCertificateImportId=rlSslCertificateImportId, rlSslSSLv2Enable=rlSslSSLv2Enable, rlSslCertificateExportTable=rlSslCertificateExportTable, rlSslCertificateImportFragmentId=rlSslCertificateImportFragmentId, rlSslCertificateGenerationEntry=rlSslCertificateGenerationEntry, rlSslCertificateGenerationOrganizationUnitName=rlSslCertificateGenerationOrganizationUnitName, rlSslImportExportSelfKeyFragmentText=rlSslImportExportSelfKeyFragmentText, rlSslCertificateGenerationTable=rlSslCertificateGenerationTable, rlSslCertificateGenerationId=rlSslCertificateGenerationId, rlSslCertificateImportEntry=rlSslCertificateImportEntry, rlSslCertificateGenerationAction=rlSslCertificateGenerationAction, rlSslCertificateExportId=rlSslCertificateExportId, rlSslCertificateImportFragmentText=rlSslCertificateImportFragmentText, rlSslCertificateGenerationIndex=rlSslCertificateGenerationIndex, rlSslCertificateImportTable=rlSslCertificateImportTable, rlSslCertificateImportFragmentStatus=rlSslCertificateImportFragmentStatus, rlSsl=rlSsl, rlSslCertificateExportFragmentText=rlSslCertificateExportFragmentText, rlSslCertificateSave=rlSslCertificateSave, rlSslCertificateGenerationStateOrProvinceName=rlSslCertificateGenerationStateOrProvinceName, rlSslImportExportSelfKeyIndex=rlSslImportExportSelfKeyIndex, rlSslCertificateGenerationLocalityName=rlSslCertificateGenerationLocalityName, rlSslCertificateGenerationValidDays=rlSslCertificateGenerationValidDays, rlSslCertificateExportEntry=rlSslCertificateExportEntry, rlSslCertificateExportFragmentId=rlSslCertificateExportFragmentId, rlSslImportExportSelfKeyFormat=rlSslImportExportSelfKeyFormat)
|
py | 1a520c8ef0f7b47b912461490c919449aa9226af | import os
import sys
sys.path.append("../../../../monk_v1/");
sys.path.append("../../../monk/");
import psutil
from keras_prototype import prototype
from compare_prototype import compare
from common import print_start
from common import print_status
import tensorflow as tf
if(tf.__version__[0] == '2'):
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
def test_layer_average_pooling1d(system_dict):
forward = True;
test = "test_layer_average_pooling1d";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
gtf = prototype(verbose=0);
gtf.Prototype("sample-project-1", "sample-experiment-1");
network = [];
network.append(gtf.average_pooling1d(kernel_size=3));
gtf.Compile_Network(network, data_shape=(3, 32), use_gpu=False);
x = tf.placeholder(tf.float32, shape=(1, 32, 3))
y = gtf.system_dict["local"]["model"](x);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
return system_dict
|
py | 1a520cab6f679308ed9b49f64f80640c4342c9dc | from fontTools.misc.py23 import bytesjoin, strjoin, tobytes, tostr
from fontTools.misc.textTools import safeEval
from fontTools.misc import sstruct
from . import DefaultTable
import base64
DSIG_HeaderFormat = """
> # big endian
ulVersion: L
usNumSigs: H
usFlag: H
"""
# followed by an array of usNumSigs DSIG_Signature records
DSIG_SignatureFormat = """
> # big endian
ulFormat: L
ulLength: L # length includes DSIG_SignatureBlock header
ulOffset: L
"""
# followed by an array of usNumSigs DSIG_SignatureBlock records,
# each followed immediately by the pkcs7 bytes
DSIG_SignatureBlockFormat = """
> # big endian
usReserved1: H
usReserved2: H
cbSignature: l # length of following raw pkcs7 data
"""
#
# NOTE
# the DSIG table format allows for SignatureBlocks residing
# anywhere in the table and possibly in a different order as
# listed in the array after the first table header
#
# this implementation does not keep track of any gaps and/or data
# before or after the actual signature blocks while decompiling,
# and puts them in the same physical order as listed in the header
# on compilation with no padding whatsoever.
#
class table_D_S_I_G_(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
dummy, newData = sstruct.unpack2(DSIG_HeaderFormat, data, self)
assert self.ulVersion == 1, "DSIG ulVersion must be 1"
assert self.usFlag & ~1 == 0, "DSIG usFlag must be 0x1 or 0x0"
self.signatureRecords = sigrecs = []
for n in range(self.usNumSigs):
sigrec, newData = sstruct.unpack2(DSIG_SignatureFormat, newData, SignatureRecord())
assert sigrec.ulFormat == 1, "DSIG signature record #%d ulFormat must be 1" % n
sigrecs.append(sigrec)
for sigrec in sigrecs:
dummy, newData = sstruct.unpack2(DSIG_SignatureBlockFormat, data[sigrec.ulOffset:], sigrec)
assert sigrec.usReserved1 == 0, "DSIG signature record #%d usReserverd1 must be 0" % n
assert sigrec.usReserved2 == 0, "DSIG signature record #%d usReserverd2 must be 0" % n
sigrec.pkcs7 = newData[:sigrec.cbSignature]
def compile(self, ttFont):
packed = sstruct.pack(DSIG_HeaderFormat, self)
headers = [packed]
offset = len(packed) + self.usNumSigs * sstruct.calcsize(DSIG_SignatureFormat)
data = []
for sigrec in self.signatureRecords:
# first pack signature block
sigrec.cbSignature = len(sigrec.pkcs7)
packed = sstruct.pack(DSIG_SignatureBlockFormat, sigrec) + sigrec.pkcs7
data.append(packed)
# update redundant length field
sigrec.ulLength = len(packed)
# update running table offset
sigrec.ulOffset = offset
headers.append(sstruct.pack(DSIG_SignatureFormat, sigrec))
offset += sigrec.ulLength
if offset % 2:
# Pad to even bytes
data.append(b'\0')
return bytesjoin(headers+data)
def toXML(self, xmlWriter, ttFont):
xmlWriter.comment("note that the Digital Signature will be invalid after recompilation!")
xmlWriter.newline()
xmlWriter.simpletag("tableHeader", version=self.ulVersion, numSigs=self.usNumSigs, flag="0x%X" % self.usFlag)
for sigrec in self.signatureRecords:
xmlWriter.newline()
sigrec.toXML(xmlWriter, ttFont)
xmlWriter.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == "tableHeader":
self.signatureRecords = []
self.ulVersion = safeEval(attrs["version"])
self.usNumSigs = safeEval(attrs["numSigs"])
self.usFlag = safeEval(attrs["flag"])
return
if name == "SignatureRecord":
sigrec = SignatureRecord()
sigrec.fromXML(name, attrs, content, ttFont)
self.signatureRecords.append(sigrec)
pem_spam = lambda l, spam = {
"-----BEGIN PKCS7-----": True, "-----END PKCS7-----": True, "": True
}: not spam.get(l.strip())
def b64encode(b):
s = base64.b64encode(b)
# Line-break at 76 chars.
items = []
while s:
items.append(tostr(s[:76]))
items.append('\n')
s = s[76:]
return strjoin(items)
class SignatureRecord(object):
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.__dict__)
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, format=self.ulFormat)
writer.newline()
writer.write_noindent("-----BEGIN PKCS7-----\n")
writer.write_noindent(b64encode(self.pkcs7))
writer.write_noindent("-----END PKCS7-----\n")
writer.endtag(self.__class__.__name__)
def fromXML(self, name, attrs, content, ttFont):
self.ulFormat = safeEval(attrs["format"])
self.usReserved1 = safeEval(attrs.get("reserved1", "0"))
self.usReserved2 = safeEval(attrs.get("reserved2", "0"))
self.pkcs7 = base64.b64decode(tobytes(strjoin(filter(pem_spam, content))))
|
py | 1a520ce6c863aea88c370f2cbf9dc3b61cdfb52b | # Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
# examples/Python/Advanced/color_map_optimization.py
import open3d as o3d
from cto.utility.logging_extension import logger
from cto.visualization import visualize_intermediate_result
from cto.config_api import create_config
from cto.config_api import get_ofp
from cto.color_optimization import color_map_optimization
from cto.data_parsing.reconstruction_parsing import import_reconstruction
if __name__ == "__main__":
# http://www.open3d.org/docs/release/tutorial/Advanced/color_map_optimization.html
logger.vinfo('o3d.__version__', o3d.__version__)
o3d.utility.set_verbosity_level(
o3d.utility.VerbosityLevel.Debug)
config = create_config()
mesh_textured_max_iter_x_ofp = get_ofp(config)
rgbd_images, camera_trajectory, mesh, depth_range = import_reconstruction(config)
visualize_intermediate_result(rgbd_images, camera_trajectory, mesh, config)
color_map_optimization(
mesh,
rgbd_images, # are used to compute gradient images
camera_trajectory,
ofp=mesh_textured_max_iter_x_ofp,
config=config,
depth_range=depth_range)
|
py | 1a520dab98767d22ebb0d9aac76b8fc51eb21a1c | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import CampaignCriterionServiceTransport
from .grpc import CampaignCriterionServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[CampaignCriterionServiceTransport]]
_transport_registry['grpc'] = CampaignCriterionServiceGrpcTransport
__all__ = (
'CampaignCriterionServiceTransport',
'CampaignCriterionServiceGrpcTransport',
)
|
py | 1a520ddda5194b737e4a2cc53079de9b64ca4621 | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPES_URL = reverse('recipe:recipe-list')
def detail_url(recipe_id):
"""Return recipe details URL"""
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name='Main course'):
"""Create nd returrn a sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Cinnamon'):
"""Create and return a sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
"""Create and return a sample recipe"""
defaults = {
'title': 'Simple recipe',
'time_minutes': 10,
'price': 5.00
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
class PublicRecipeApiTests(TestCase):
"""Test unauthenticated recie API access"""
def setUp(self):
self.client = APIClient()
def test_auth_required(self):
"""Test unauthenticated recipe API access"""
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTests(TestCase):
"""Test unauthenticated recipe API access"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'[email protected]',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
"""Test retrieving a list of recipes"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""Test retrieving recipes for user"""
user2 = get_user_model().objects.create_user(
'[email protected]',
'password123'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""Test viewing a recipe detail"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
"""Test creating recipe"""
payload = {
'title': 'Chocolate cheesecake',
'time_minutes': 30,
'price': 5.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
def test_create_recipe_with_tags(self):
"""Test creating a recipe with tags"""
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Dessert')
payload = {
'title': 'Avocado lime cheesecake',
'tags': [tag1.id, tag2.id],
'time_minutes': 60,
'price': 20.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredients(self):
"""Test creating recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user, name='Prawns')
ingredient2 = sample_ingredient(user=self.user, name='Ginger')
payload = {
'title': 'Thai prawn red curry',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 20,
'price': 7.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recipe(self):
"""Test updating a recipe with patch"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name='Curry')
payload = {'title': 'Chicken tikka', 'tags': [new_tag.id]}
url = detail_url(recipe.id)
self.client.patch(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_full_updte_recipe(self):
"""Test updating a recipe with put"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Spaghetti carbonara',
'time_minutes': 25,
'price': 5.00
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
|
py | 1a520f3ebdc728556f73ad6cea8810059111f638 | from sqlalchemy import desc, func
from sqlalchemy.orm import Session
from sqlalchemy.sql.expression import and_
from app.models import Fund, Holding, News, Trades
def get_etf_profile(db: Session, symbol: str):
return db.query(Fund).filter(Fund.symbol == symbol).one()
def get_etf_current_holdings(db: Session, symbols: str, limit: int):
subq = (
db.query(
Holding.fund,
func.max(Holding.date).label("maxdate"),
)
.filter(
Holding.fund.in_([s for s in symbols]),
)
.group_by(Holding.fund)
.subquery()
)
q = (
db.query(
Holding.fund,
Holding.date,
Holding.ticker,
Holding.company,
Holding.cusip,
Holding.shares,
Holding.market_value,
Holding.share_price,
Holding.weight,
Holding.weight_rank,
)
.join(
subq,
and_(
Holding.fund == subq.c.fund,
Holding.date == subq.c.maxdate,
),
)
.order_by(
Holding.date,
Holding.fund,
Holding.weight_rank,
)
)
if limit:
return q.order_by("date", "weight_rank").limit(limit).all()
else:
return q.all()
def get_etf_holdings(
db: Session, symbols: str, date_from: str, date_to: str, limit: int
):
q = (
db.query(
Holding.fund,
Holding.date,
Holding.ticker,
Holding.company,
Holding.cusip,
Holding.shares,
Holding.market_value,
Holding.share_price,
Holding.weight,
Holding.weight_rank,
)
.filter(
Holding.fund.in_([s for s in symbols]),
Holding.date >= date_from,
Holding.date <= date_to,
)
.order_by(
Holding.date,
Holding.fund,
Holding.weight_rank,
)
)
if limit:
return q.order_by("date", "weight_rank").limit(limit).all()
else:
return q.all()
def get_etf_holdings_dates(db: Session, symbols: str):
return (
db.query(
func.max(Holding.date).label("maxdate"),
)
.filter(Holding.fund.in_([s for s in symbols]))
.group_by(Holding.fund)
.all()
)
def get_etf_trades(
db: Session, symbols: str, start_date: str, end_date: str, limit: int
):
q = (
db.query(
Trades.fund,
Trades.date,
Trades.ticker,
Trades.company,
Trades.direction,
Trades.cusip,
Trades.shares,
Trades.etf_percent,
)
.filter(
Trades.fund.in_([s for s in symbols]),
Trades.date >= start_date,
Trades.date <= end_date,
)
.order_by(
Trades.date,
Trades.fund,
Trades.etf_percent.desc(),
)
)
if limit:
return q.limit(limit).all()
else:
return q.all()
def get_etf_trades_dates(db: Session, symbols: str):
return (
db.query(
func.min(Trades.date).label("mindate"),
func.max(Trades.date).label("maxdate"),
)
.filter(Trades.fund.in_([s for s in symbols]))
.one()
)
def get_etf_trades_maxdate(db: Session):
return (
db.query(
func.max(Trades.date).label("maxdate"),
).one()
)[0]
def get_stock_fundownership_distinct_dates(
db: Session, symbol: str, date_from: str, date_to: str
):
return (
db.query(Holding.date)
.filter(
Holding.ticker == symbol,
Holding.date >= date_from,
Holding.date <= date_to,
)
.distinct()
)
def get_stock_fundownership(db: Session, symbol: str, date: str):
return (
db.query(Holding)
.filter(
Holding.ticker == symbol,
Holding.date == date,
)
.all()
)
def get_stock_fundownership_dates(db: Session, symbol: str):
return (
db.query(
func.min(Holding.date).label("mindate"),
func.max(Holding.date).label("maxdate"),
)
.filter(Holding.ticker == symbol)
.first()
)
def get_stock_trades(
db: Session, symbol: str, direction: str, date_from: str, date_to: str, limit: int
):
if direction:
q = (
db.query(Trades)
.filter(
Trades.ticker == symbol,
Trades.direction == direction.capitalize(),
Trades.date >= date_from,
Trades.date <= date_to,
)
.order_by(Trades.date.desc(), Trades.fund)
)
else:
q = (
db.query(Trades)
.filter(
Trades.ticker == symbol,
Trades.date >= date_from,
Trades.date <= date_to,
)
.order_by(Trades.date.desc(), Trades.fund)
)
if limit:
return q.limit(limit).all()
else:
return q.all()
def get_stock_trades_dates(db: Session, symbol: str):
return (
db.query(
func.min(Trades.date).label("mindate"),
func.max(Trades.date).label("maxdate"),
)
.filter(Trades.ticker == symbol)
.one()
)
def get_etf_news(db: Session, symbols: str, date_from: str, date_to: str, limit: int):
return (
db.query(News)
.filter(
News.category == "etf",
News.datetime >= date_from,
News.datetime <= date_to,
News.related.in_([s for s in symbols]),
)
.order_by(desc("datetime"))
.limit(limit)
.all()
)
def get_etf_news_min_date(db: Session, symbols: str):
return (
db.query(
func.min(News.datetime).label("mindate"),
)
.filter(
News.related.in_([s for s in symbols]),
News.category == "etf",
)
.one()
)[0]
|
py | 1a520f8e5f9179ad29b8bc3d5ebe2b21a0a654b1 | """
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import enum
import math
import re
import warnings
from collections import OrderedDict
from copy import copy
from functools import partial
from pathlib import Path
class ConfigError(ValueError):
pass
class BaseValidator:
def __init__(self, on_error=None, additional_validator=None):
self.on_error = on_error
self.additional_validator = additional_validator
self.field_uri = None
def validate(self, entry, field_uri=None):
field_uri = field_uri or self.field_uri
if self.additional_validator and not self.additional_validator(entry, field_uri):
self.raise_error(entry, field_uri)
def raise_error(self, value, field_uri, reason=None):
if self.on_error:
self.on_error(value, field_uri, reason)
error_message = 'Invalid value "{value}" for {field_uri}'.format(value=value, field_uri=field_uri)
if reason:
error_message = '{error_message}: {reason}'.format(error_message=error_message, reason=reason)
raise ConfigError(error_message.format(value, field_uri))
class _ExtraArgumentBehaviour(enum.Enum):
WARN = 'warn'
IGNORE = 'ignore'
ERROR = 'error'
_WARN_ON_EXTRA_ARGUMENT = _ExtraArgumentBehaviour.WARN
_ERROR_ON_EXTRA_ARGUMENT = _ExtraArgumentBehaviour.ERROR
_IGNORE_ON_EXTRA_ARGUMENT = _ExtraArgumentBehaviour.IGNORE
def _is_dict_like(entry):
return hasattr(entry, '__iter__') and hasattr(entry, '__getitem__')
class ConfigValidator(BaseValidator):
WARN_ON_EXTRA_ARGUMENT = _WARN_ON_EXTRA_ARGUMENT
ERROR_ON_EXTRA_ARGUMENT = _ERROR_ON_EXTRA_ARGUMENT
IGNORE_ON_EXTRA_ARGUMENT = _IGNORE_ON_EXTRA_ARGUMENT
def __init__(self, config_uri, on_extra_argument=_WARN_ON_EXTRA_ARGUMENT, **kwargs):
super().__init__(**kwargs)
self.on_extra_argument = _ExtraArgumentBehaviour(on_extra_argument)
self._fields = OrderedDict()
self.field_uri = config_uri
for name in dir(self):
value = getattr(self, name)
if not isinstance(value, BaseField):
continue
field_copy = copy(value)
field_copy.field_uri = "{}.{}".format(config_uri, name)
self._fields[name] = field_copy
def validate(self, entry, field_uri=None):
super().validate(entry, field_uri)
field_uri = field_uri or self.field_uri
if not _is_dict_like(entry):
raise ConfigError("{} is expected to be dict-like".format(field_uri))
extra_arguments = []
for key in entry:
if key not in self._fields:
extra_arguments.append(key)
continue
self._fields[key].validate(entry[key])
required_fields = set(field_name for field_name, field_value in self._fields.items()
if not field_value.optional)
missing_arguments = required_fields.difference(entry)
if missing_arguments:
self.raise_error(entry, field_uri,
"Invalid config for {}: missing required fields: {}".format(field_uri, ', '.join(
map(str, missing_arguments))))
if extra_arguments:
unknown_options_error = "specifies unknown options: {}".format(extra_arguments)
message = "{} {}".format(field_uri, unknown_options_error)
if self.on_extra_argument == _ExtraArgumentBehaviour.WARN:
warnings.warn(message)
if self.on_extra_argument == _ExtraArgumentBehaviour.ERROR:
self.raise_error(entry, field_uri, message)
@property
def known_fields(self):
return set(self._fields)
def raise_error(self, value, field_uri, reason=None):
if self.on_error:
self.on_error(value, field_uri, reason)
else:
raise ConfigError(reason)
class BaseField(BaseValidator):
def __init__(self, optional=False, allow_none=False, **kwargs):
super().__init__(**kwargs)
self.optional = optional
self.allow_none = allow_none
def validate(self, entry, field_uri=None):
super().validate(entry, field_uri)
field_uri = field_uri or self.field_uri
if not self.allow_none and entry is None:
raise ConfigError("{} is not allowed to be None".format(field_uri))
class StringField(BaseField):
def __init__(self, choices=None, regex=None, case_sensitive=False, **kwargs):
super().__init__(**kwargs)
self.choices = choices if case_sensitive or not choices else list(map(str.lower, choices))
self.regex = re.compile(regex, flags=re.IGNORECASE if not case_sensitive else 0) if regex else None
self.case_sensitive = case_sensitive
def validate(self, entry, field_uri=None):
super().validate(entry, field_uri)
if entry is None:
return
field_uri = field_uri or self.field_uri
source_entry = entry
if not isinstance(entry, str):
raise ConfigError("{} is expected to be str".format(source_entry))
if not self.case_sensitive:
entry = entry.lower()
if self.choices and entry not in self.choices:
reason = "unsupported option, expected one of: {}".format(', '.join(map(str, self.choices)))
self.raise_error(source_entry, field_uri, reason)
if self.regex and not self.regex.match(entry):
self.raise_error(source_entry, field_uri, reason=None)
class DictField(BaseField):
def __init__(self, key_type=None, value_type=None, validate_keys=True, validate_values=True, allow_empty=True,
**kwargs):
super().__init__(**kwargs)
self.validate_keys = validate_keys if key_type else False
self.validate_values = validate_values if value_type else False
self.key_type = _get_field_type(key_type)
self.value_type = _get_field_type(value_type)
self.allow_empty = allow_empty
def validate(self, entry, field_uri=None):
super().validate(entry, field_uri)
if entry is None:
return
field_uri = field_uri or self.field_uri
if not isinstance(entry, dict):
raise ConfigError("{} is expected to be dict".format(field_uri))
if not entry and not self.allow_empty:
self.raise_error(entry, field_uri, "value is empty")
for k, v in entry.items():
if self.validate_keys:
uri = "{}.keys.{}".format(field_uri, k)
self.key_type.validate(k, uri)
if self.validate_values:
uri = "{}.{}".format(field_uri, k)
self.value_type.validate(v, uri)
class ListField(BaseField):
def __init__(self, value_type=None, validate_values=True, allow_empty=True, **kwargs):
super().__init__(**kwargs)
self.validate_values = validate_values if value_type else False
self.value_type = _get_field_type(value_type)
self.allow_empty = allow_empty
def validate(self, entry, field_uri=None):
super().validate(entry, field_uri)
if entry is None:
return
if not isinstance(entry, list):
raise ConfigError("{} is expected to be list".format(field_uri))
if not entry and not self.allow_empty:
self.raise_error(entry, field_uri, "value is empty")
if self.validate_values:
for i, val in enumerate(entry):
self.value_type.validate(val, "{}[{}]".format(val, i))
class NumberField(BaseField):
def __init__(self, floats=True, min_value=None, max_value=None, allow_inf=False, allow_nan=False, **kwargs):
super().__init__(**kwargs)
self.floats = floats
self.min = min_value
self.max = max_value
self.allow_inf = allow_inf
self.allow_nan = allow_nan
def validate(self, entry, field_uri=None):
super().validate(entry, field_uri)
if entry is None:
return
field_uri = field_uri or self.field_uri
if not self.floats and isinstance(entry, float):
raise ConfigError("{} is expected to be int".format(field_uri))
if not isinstance(entry, int) and not isinstance(entry, float):
raise ConfigError("{} is expected to be number".format(field_uri))
if self.min is not None and entry < self.min:
reason = "value is less than minimal allowed - {}".format(self.min)
self.raise_error(entry, field_uri, reason)
if self.max is not None and entry > self.max:
reason = "value is greater than maximal allowed - {}".format(self.max)
self.raise_error(entry, field_uri, reason)
if math.isinf(entry) and not self.allow_inf:
self.raise_error(entry, field_uri, "value is infinity")
if math.isnan(entry) and not self.allow_nan:
self.raise_error(entry, field_uri, "value is NaN")
class PathField(BaseField):
def __init__(self, check_exists=False, is_directory=None, **kwargs):
super().__init__(**kwargs)
self.check_exists = check_exists
self.is_directory = is_directory
def validate(self, entry, field_uri=None):
super().validate(entry, field_uri)
if entry is None:
return
field_uri = field_uri or self.field_uri
try:
path = Path(entry)
except TypeError:
self.raise_error(entry, field_uri, "values is expected to be path-like")
if self.check_exists and not path.exists():
self.raise_error(entry, field_uri, "path does not exist")
else:
if self.is_directory and not path.is_dir():
self.raise_error(entry, field_uri, "is not a directory")
if self.is_directory is False and not path.is_file():
self.raise_error(entry, field_uri, "is a directory, regular file expected")
class BoolField(BaseField):
def validate(self, entry, field_uri=None):
super().validate(entry, field_uri)
if entry is None:
return
field_uri = field_uri or self.field_uri
if not isinstance(entry, bool):
raise ConfigError("{} is expected to be bool".format(field_uri))
def _get_field_type(key_type):
if not isinstance(key_type, BaseField):
type_ = _TYPE_TO_FIELD_CLASS.get(key_type)
if callable(type_):
return type_()
return key_type
_TYPE_TO_FIELD_CLASS = {
int: partial(NumberField, floats=False),
float: partial(NumberField, floats=True),
dict: partial(DictField, validate_keys=False, validate_values=False),
list: partial(ListField, validate_values=False),
Path: PathField,
str: StringField,
bool: BoolField,
}
|
py | 1a520fe025543f583039e934274ef74f1d59570b | # coding: utf-8
import pprint
import re
import six
class ListScalingTagInfosByResourceIdRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'resource_type': 'str',
'resource_id': 'str'
}
attribute_map = {
'resource_type': 'resource_type',
'resource_id': 'resource_id'
}
def __init__(self, resource_type=None, resource_id=None):
"""ListScalingTagInfosByResourceIdRequest - a model defined in huaweicloud sdk"""
self._resource_type = None
self._resource_id = None
self.discriminator = None
self.resource_type = resource_type
self.resource_id = resource_id
@property
def resource_type(self):
"""Gets the resource_type of this ListScalingTagInfosByResourceIdRequest.
:return: The resource_type of this ListScalingTagInfosByResourceIdRequest.
:rtype: str
"""
return self._resource_type
@resource_type.setter
def resource_type(self, resource_type):
"""Sets the resource_type of this ListScalingTagInfosByResourceIdRequest.
:param resource_type: The resource_type of this ListScalingTagInfosByResourceIdRequest.
:type: str
"""
self._resource_type = resource_type
@property
def resource_id(self):
"""Gets the resource_id of this ListScalingTagInfosByResourceIdRequest.
:return: The resource_id of this ListScalingTagInfosByResourceIdRequest.
:rtype: str
"""
return self._resource_id
@resource_id.setter
def resource_id(self, resource_id):
"""Sets the resource_id of this ListScalingTagInfosByResourceIdRequest.
:param resource_id: The resource_id of this ListScalingTagInfosByResourceIdRequest.
:type: str
"""
self._resource_id = resource_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListScalingTagInfosByResourceIdRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a52105e597a0f5b3308138bd5b2aa4555973f2a | from BTcrypto import Crypto
from BT1.Encrypter import protocol_name
default_task_id = []
class SingleRawServer:
def __init__(self, info_hash, multihandler, doneflag, protocol):
self.info_hash = info_hash
self.doneflag = doneflag
self.protocol = protocol
self.multihandler = multihandler
self.rawserver = multihandler.rawserver
self.finished = False
self.running = False
self.handler = None
self.taskqueue = []
def shutdown(self):
if not self.finished:
self.multihandler.shutdown_torrent(self.info_hash)
def _shutdown(self):
if not self.finished:
self.finished = True
self.running = False
self.rawserver.kill_tasks(self.info_hash)
if self.handler:
self.handler.close_all()
def _external_connection_made(self, c, options, already_read,
encrypted=None):
if self.running:
c.set_handler(self.handler)
self.handler.externally_handshaked_connection_made(
c, options, already_read, encrypted=encrypted)
### RawServer functions ###
def add_task(self, func, delay=0, id=default_task_id):
if id is default_task_id:
id = self.info_hash
if not self.finished:
self.rawserver.add_task(func, delay, id)
# def bind(self, port, bind = '', reuse = False):
# pass # not handled here
def start_connection(self, dns, handler=None):
if not handler:
handler = self.handler
c = self.rawserver.start_connection(dns, handler)
return c
# def listen_forever(self, handler):
# pass # don't call with this
def start_listening(self, handler):
self.handler = handler
self.running = True
return self.shutdown # obviously, doesn't listen forever
def is_finished(self):
return self.finished
def get_exception_flag(self):
return self.rawserver.get_exception_flag()
class NewSocketHandler: # hand a new socket off where it belongs
def __init__(self, multihandler, connection):
self.multihandler = multihandler
self.connection = connection
connection.set_handler(self)
self.closed = False
self.buffer = ''
self.complete = False
self.read = self._read
self.write = connection.write
self.next_len = 1 + len(protocol_name)
self.next_func = self.read_header
self.multihandler.rawserver.add_task(self._auto_close, 30)
def _auto_close(self):
if not self.complete:
self.close()
def close(self):
if not self.closed:
self.connection.close()
self.closed = True
# copied from Encrypter and modified
def _read_header(self, s):
if s == chr(len(protocol_name)) + protocol_name:
self.protocol = protocol_name
return 8, self.read_options
return None
def read_header(self, s):
if self._read_header(s):
if self.multihandler.config['crypto_only']:
return None
return 8, self.read_options
if not self.multihandler.config['crypto_allowed']:
return None
self.encrypted = True
self.encrypter = Crypto(False)
self._write_buffer(s)
return self.encrypter.keylength, self.read_crypto_header
def read_crypto_header(self, s):
self.encrypter.received_key(s)
self.write(self.encrypter.pubkey + self.encrypter.padding())
self._max_search = 520
return 0, self.read_crypto_block3a
def _search_for_pattern(self, s, pat):
p = s.find(pat)
if p < 0:
self._max_search -= len(s) + 1 - len(pat)
if self._max_search < 0:
self.close()
return False
self._write_buffer(s[1 - len(pat):])
return False
self._write_buffer(s[p + len(pat):])
return True
def read_crypto_block3a(self, s):
if not self._search_for_pattern(s, self.encrypter.block3a):
return -1, self.read_crypto_block3a # wait for more data
return 20, self.read_crypto_block3b
def read_crypto_block3b(self, s):
srs = self.multihandler.singlerawservers
for k in srs:
if self.encrypter.test_skey(s, k):
srs[k]._external_connection_made(
self.connection, None, self.buffer,
encrypted=self.encrypter)
return True
return None
def read_options(self, s):
self.options = s
return 20, self.read_download_id
def read_download_id(self, s):
srs = self.multihandler.singlerawservers
if s in srs:
if srs[s].protocol == self.protocol:
srs[s]._external_connection_made(
self.connection, self.options, self.buffer)
return True
return None
def read_dead(self, s):
return None
def data_came_in(self, garbage, s):
self.read(s)
def _write_buffer(self, s):
self.buffer = s + self.buffer
def _read(self, s):
self.buffer += s
while True:
if self.closed:
return
# self.next_len = # of characters function expects
# or 0 = all characters in the buffer
# or -1 = wait for next read, then all characters in the buffer
if self.next_len <= 0:
m = self.buffer
self.buffer = ''
elif len(self.buffer) >= self.next_len:
m = self.buffer[:self.next_len]
self.buffer = self.buffer[self.next_len:]
else:
return
try:
x = self.next_func(m)
except:
self.next_len, self.next_func = 1, self.read_dead
raise
if x is None:
self.close()
return
if x:
self.complete = True
return
self.next_len, self.next_func = x
if self.next_len < 0: # already checked buffer
return # wait for additional data
def connection_flushed(self, ss):
pass
def connection_lost(self, ss):
self.closed = True
class MultiHandler:
def __init__(self, rawserver, doneflag, config):
self.rawserver = rawserver
self.masterdoneflag = doneflag
self.config = config
self.singlerawservers = {}
self.connections = {}
self.taskqueues = {}
def newRawServer(self, info_hash, doneflag, protocol=protocol_name):
new = SingleRawServer(info_hash, self, doneflag, protocol)
self.singlerawservers[info_hash] = new
return new
def shutdown_torrent(self, info_hash):
self.singlerawservers[info_hash]._shutdown()
del self.singlerawservers[info_hash]
def listen_forever(self):
self.rawserver.listen_forever(self)
for srs in self.singlerawservers.itervalues():
srs.finished = True
srs.running = False
srs.doneflag.set()
### RawServer handler functions ###
# be wary of name collisions
def external_connection_made(self, ss):
NewSocketHandler(self, ss)
|
py | 1a521079028892cfef78c5aea8ea9de216b1ce0f | from flask import Flask
from application.config.config import config
def create_app():
app = Flask(__name__)
app.config.from_object(config)
return app
|
py | 1a5210bf9ff61e32fd44b28f3890d977471892f3 | # coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
import datetime
import threading
import time
import pandas as pd
from QUANTAXIS.QAUtil.QALogs import QA_util_log_info
def QA_util_time_now():
"""
返回当前时间
:return: 类型datetime.datetime
"""
return datetime.datetime.now()
def QA_util_date_today():
"""
返回当前日期
:return: 类型datetime.date
"""
return datetime.date.today()
def QA_util_today_str():
"""
返回今天的日期字符串
:return: 类型字符串 2011-11-11
"""
dt = QA_util_date_today()
return QA_util_datetime_to_strdate(dt)
def QA_util_date_str2int(date):
"""
日期字符串 '2011-09-11' 变换成 整数 20110911
日期字符串 '2018-12-01' 变换成 整数 20181201
:param date: str日期字符串
:return: 类型int
"""
# return int(str(date)[0:4] + str(date)[5:7] + str(date)[8:10])
if isinstance(date, str):
return int(str().join(date.split('-')))
elif isinstance(date, int):
return date
def QA_util_date_int2str(int_date):
"""
类型datetime.datatime
:param date: int 8位整数
:return: 类型str
"""
date = str(int_date)
if len(date) == 8:
return str(date[0:4] + '-' + date[4:6] + '-' + date[6:8])
elif len(date) == 10:
return date
def QA_util_to_datetime(time):
"""
字符串 '2018-01-01' 转变成 datatime 类型
:param time: 字符串str -- 格式必须是 2018-01-01 ,长度10
:return: 类型datetime.datatime
"""
if len(str(time)) == 10:
_time = '{} 00:00:00'.format(time)
elif len(str(time)) == 19:
_time = str(time)
else:
QA_util_log_info('WRONG DATETIME FORMAT {}'.format(time))
return datetime.datetime.strptime(_time, '%Y-%m-%d %H:%M:%S')
def QA_util_datetime_to_strdate(dt):
"""
:param dt: pythone datetime.datetime
:return: 1999-02-01 string type
"""
strdate = "%04d-%02d-%02d" % (dt.year, dt.month, dt.day)
return strdate
def QA_util_datetime_to_strdatetime(dt):
"""
:param dt: pythone datetime.datetime
:return: 1999-02-01 09:30:91 string type
"""
strdatetime = "%04d-%02d-%02d %02d:%02d:%02d" % (
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second
)
return strdatetime
def QA_util_date_stamp(date):
"""
字符串 '2018-01-01' 转变成 float 类型时间 类似 time.time() 返回的类型
:param date: 字符串str -- 格式必须是 2018-01-01 ,长度10
:return: 类型float
"""
datestr = str(date)[0:10]
date = time.mktime(time.strptime(datestr, '%Y-%m-%d'))
return date
def QA_util_time_stamp(time_):
"""
字符串 '2018-01-01 00:00:00' 转变成 float 类型时间 类似 time.time() 返回的类型
:param time_: 字符串str -- 数据格式 最好是%Y-%m-%d %H:%M:%S 中间要有空格
:return: 类型float
"""
if len(str(time_)) == 10:
# yyyy-mm-dd格式
return time.mktime(time.strptime(time_, '%Y-%m-%d'))
elif len(str(time_)) == 16:
# yyyy-mm-dd hh:mm格式
return time.mktime(time.strptime(time_, '%Y-%m-%d %H:%M'))
else:
timestr = str(time_)[0:19]
return time.mktime(time.strptime(timestr, '%Y-%m-%d %H:%M:%S'))
def QA_util_pands_timestamp_to_date(pandsTimestamp):
"""
转换 pandas 的时间戳 到 datetime.date类型
:param pandsTimestamp: 类型 pandas._libs.tslib.Timestamp
:return: datetime.datetime类型
"""
return pandsTimestamp.to_pydatetime().date()
def QA_util_pands_timestamp_to_datetime(pandsTimestamp):
"""
转换 pandas 的时间戳 到 datetime.datetime类型
:param pandsTimestamp: 类型 pandas._libs.tslib.Timestamp
:return: datetime.datetime类型
"""
return pandsTimestamp.to_pydatetime()
def QA_util_stamp2datetime(timestamp):
"""
datestamp转datetime
pandas转出来的timestamp是13位整数 要/1000
It’s common for this to be restricted to years from 1970 through 2038.
从1970年开始的纳秒到当前的计数 转变成 float 类型时间 类似 time.time() 返回的类型
:param timestamp: long类型
:return: 类型float
"""
try:
return datetime.datetime.fromtimestamp(timestamp)
except Exception as e:
# it won't work ??
try:
return datetime.datetime.fromtimestamp(timestamp / 1000)
except:
try:
return datetime.datetime.fromtimestamp(timestamp / 1000000)
except:
return datetime.datetime.fromtimestamp(timestamp / 1000000000)
#
def QA_util_ms_stamp(ms):
"""
直接返回不做处理
:param ms: long类型 -- tick count
:return: 返回ms
"""
return ms
def QA_util_date_valid(date):
"""
判断字符串是否是 1982-05-11 这种格式
:param date: date 字符串str -- 格式 字符串长度10
:return: boolean -- 格式是否正确
"""
try:
time.strptime(date, "%Y-%m-%d")
return True
except:
return False
def QA_util_realtime(strtime, client):
"""
查询数据库中的数据
:param strtime: strtime str字符串 -- 1999-12-11 这种格式
:param client: client pymongo.MongoClient类型 -- mongodb 数据库 从 QA_util_sql_mongo_setting 中 QA_util_sql_mongo_setting 获取
:return: Dictionary -- {'time_real': 时间,'id': id}
"""
time_stamp = QA_util_date_stamp(strtime)
coll = client.quantaxis.trade_date
temp_str = coll.find_one({'date_stamp': {"$gte": time_stamp}})
time_real = temp_str['date']
time_id = temp_str['num']
return {'time_real': time_real, 'id': time_id}
def QA_util_id2date(idx, client):
"""
从数据库中查询 通达信时间
:param idx: 字符串 -- 数据库index
:param client: pymongo.MongoClient类型 -- mongodb 数据库 从 QA_util_sql_mongo_setting 中 QA_util_sql_mongo_setting 获取
:return: Str -- 通达信数据库时间
"""
coll = client.quantaxis.trade_date
temp_str = coll.find_one({'num': idx})
return temp_str['date']
def QA_util_is_trade(date, code, client):
"""
判断是否是交易日
从数据库中查询
:param date: str类型 -- 1999-12-11 这种格式 10位字符串
:param code: str类型 -- 股票代码 例如 603658 , 6位字符串
:param client: pymongo.MongoClient类型 -- mongodb 数据库 从 QA_util_sql_mongo_setting 中 QA_util_sql_mongo_setting 获取
:return: Boolean -- 是否是交易时间
"""
coll = client.quantaxis.stock_day
date = str(date)[0:10]
is_trade = coll.find_one({'code': code, 'date': date})
try:
len(is_trade)
return True
except:
return False
def QA_util_get_date_index(date, trade_list):
"""
返回在trade_list中的index位置
:param date: str类型 -- 1999-12-11 这种格式 10位字符串
:param trade_list: ??
:return: ??
"""
return trade_list.index(date)
def QA_util_get_index_date(id, trade_list):
"""
:param id: :??
:param trade_list: ??
:return: ??
"""
return trade_list[id]
def QA_util_select_hours(time=None, gt=None, lt=None, gte=None, lte=None):
'quantaxis的时间选择函数,约定时间的范围,比如早上9点到11点'
if time is None:
__realtime = datetime.datetime.now()
else:
__realtime = time
fun_list = []
if gt != None:
fun_list.append('>')
if lt != None:
fun_list.append('<')
if gte != None:
fun_list.append('>=')
if lte != None:
fun_list.append('<=')
assert len(fun_list) > 0
true_list = []
try:
for item in fun_list:
if item == '>':
if __realtime.strftime('%H') > gt:
true_list.append(0)
else:
true_list.append(1)
elif item == '<':
if __realtime.strftime('%H') < lt:
true_list.append(0)
else:
true_list.append(1)
elif item == '>=':
if __realtime.strftime('%H') >= gte:
true_list.append(0)
else:
true_list.append(1)
elif item == '<=':
if __realtime.strftime('%H') <= lte:
true_list.append(0)
else:
true_list.append(1)
except:
return Exception
if sum(true_list) > 0:
return False
else:
return True
def QA_util_select_min(time=None, gt=None, lt=None, gte=None, lte=None):
"""
'quantaxis的时间选择函数,约定时间的范围,比如30分到59分'
:param time:
:param gt:
:param lt:
:param gte:
:param lte:
:return:
"""
if time is None:
__realtime = datetime.datetime.now()
else:
__realtime = time
fun_list = []
if gt != None:
fun_list.append('>')
if lt != None:
fun_list.append('<')
if gte != None:
fun_list.append('>=')
if lte != None:
fun_list.append('<=')
assert len(fun_list) > 0
true_list = []
try:
for item in fun_list:
if item == '>':
if __realtime.strftime('%M') > gt:
true_list.append(0)
else:
true_list.append(1)
elif item == '<':
if __realtime.strftime('%M') < lt:
true_list.append(0)
else:
true_list.append(1)
elif item == '>=':
if __realtime.strftime('%M') >= gte:
true_list.append(0)
else:
true_list.append(1)
elif item == '<=':
if __realtime.strftime('%M') <= lte:
true_list.append(0)
else:
true_list.append(1)
except:
return Exception
if sum(true_list) > 0:
return False
else:
return True
def QA_util_time_delay(time_=0):
"""
'这是一个用于复用/比如说@装饰器的延时函数\
使用threading里面的延时,为了是不阻塞进程\
有时候,同时发进去两个函数,第一个函数需要延时\
第二个不需要的话,用sleep就会阻塞掉第二个进程'
:param time_:
:return:
"""
def _exec(func):
threading.Timer(time_, func)
return _exec
def QA_util_calc_time(func, *args, **kwargs):
"""
'耗时长度的装饰器'
:param func:
:param args:
:param kwargs:
:return:
"""
_time = datetime.datetime.now()
func(*args, **kwargs)
print(datetime.datetime.now() - _time)
# return datetime.datetime.now() - _time
month_data = pd.date_range(
'1/1/1996',
'12/31/2023',
freq='Q-MAR'
).astype(str).tolist()
if __name__ == '__main__':
print(QA_util_time_stamp('2017-01-01 10:25:08'))
|
py | 1a52110ef36bbc0888e03cc25b3717822cb75c16 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AST node annotation support.
Adapted from Tangent.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
# pylint:disable=g-bad-import-order
import gast
# pylint:enable=g-bad-import-order
# TODO(mdan): Shorten the names.
# These names are heavily used, and anno.blaa
# TODO(mdan): Replace the attr-dict mechanism with a more typed solution.
class NoValue(enum.Enum):
def __repr__(self):
return self.name
class Basic(NoValue):
"""Container for basic annotation keys.
The enum values are used strictly for documentation purposes.
"""
QN = 'Qualified name, as it appeared in the code. See qual_names.py.'
SKIP_PROCESSING = (
'This node should be preserved as is and not processed any further.')
INDENT_BLOCK_REMAINDER = (
'When a node is annotated with this, the remainder of the block should'
' be indented below it. The annotation contains a tuple'
' (new_body, name_map), where `new_body` is the new indented block and'
' `name_map` allows renaming symbols.')
ORIGIN = ('Information about the source code that converted code originated'
' from. See origin_information.py.')
class Static(NoValue):
"""Container for static analysis annotation keys.
The enum values are used strictly for documentation purposes.
"""
# Deprecated - use reaching definitions instead.
# Symbols
# These flags are boolean.
IS_LOCAL = 'Symbol is local to the function scope being analyzed.'
IS_PARAM = 'Symbol is a parameter to the function being analyzed.'
# Scopes
# Scopes are represented by objects of type activity.Scope.
SCOPE = 'The scope for the annotated node. See activity.py.'
# TODO(mdan): Drop these in favor of accessing the child's SCOPE.
ARGS_SCOPE = 'The scope for the argument list of a function call.'
COND_SCOPE = 'The scope for the test node of a conditional statement.'
BODY_SCOPE = (
'The scope for the main body of a statement (True branch for if '
'statements, main body for loops).')
ORELSE_SCOPE = (
'The scope for the orelse body of a statement (False branch for if '
'statements, orelse body for loops).')
# Static analysis annotations.
DEFINITIONS = (
'Reaching definition information. See reaching_definitions.py.')
ORIG_DEFINITIONS = (
'The value of DEFINITIONS that applied to the original code before any'
' conversion.')
DEFINED_VARS_IN = (
'Symbols defined when entering the node. See reaching_definitions.py.')
LIVE_VARS_OUT = ('Symbols live when exiting the node. See liveness.py.')
FAIL = object()
def keys(node, field_name='___pyct_anno'):
if not hasattr(node, field_name):
return frozenset()
return frozenset(getattr(node, field_name).keys())
def getanno(node, key, default=FAIL, field_name='___pyct_anno'):
if (default is FAIL or (hasattr(node, field_name) and
(key in getattr(node, field_name)))):
return getattr(node, field_name)[key]
else:
return default
def hasanno(node, key, field_name='___pyct_anno'):
return hasattr(node, field_name) and key in getattr(node, field_name)
def setanno(node, key, value, field_name='___pyct_anno'):
annotations = getattr(node, field_name, {})
setattr(node, field_name, annotations)
annotations[key] = value
# So that the annotations survive gast_to_ast() and ast_to_gast()
if field_name not in node._fields:
node._fields += (field_name,)
def delanno(node, key, field_name='___pyct_anno'):
annotations = getattr(node, field_name)
del annotations[key]
if not annotations:
delattr(node, field_name)
node._fields = tuple(f for f in node._fields if f != field_name)
def copyanno(from_node, to_node, key, field_name='___pyct_anno'):
if hasanno(from_node, key, field_name=field_name):
setanno(
to_node,
key,
getanno(from_node, key, field_name=field_name),
field_name=field_name)
def dup(node, copy_map, field_name='___pyct_anno'):
"""Recursively copies annotations in an AST tree.
Args:
node: ast.AST
copy_map: Dict[Hashable, Hashable], maps a source anno key to a destination
key. All annotations with the source key will be copied to identical
annotations with the destination key.
field_name: str
"""
for n in gast.walk(node):
for k in copy_map:
if hasanno(n, k, field_name):
setanno(n, copy_map[k], getanno(n, k, field_name), field_name)
|
py | 1a52123c8917d643678b365b3776d5c7f33f42db | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The QQcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the rawtransaction RPCs.
Test the following RPCs:
- createrawtransaction
- signrawtransaction
- sendrawtransaction
- decoderawtransaction
- getrawtransaction
"""
from test_framework.test_framework import QQcoinTestFramework
from test_framework.util import *
class multidict(dict):
"""Dictionary that allows duplicate keys.
Constructed with a list of (key, value) tuples. When dumped by the json module,
will output invalid json with repeated keys, eg:
>>> json.dumps(multidict([(1,2),(1,2)])
'{"1": 2, "1": 2}'
Used to test calls to rpc methods with repeated keys in the json object."""
def __init__(self, x):
dict.__init__(self, x)
self.x = x
def items(self):
return self.x
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(QQcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-addresstype=legacy"], ["-addresstype=legacy"], ["-addresstype=legacy"]]
def setup_network(self, split=False):
super().setup_network()
connect_nodes_bi(self.nodes,0,2)
def run_test(self):
#prepare some coins for multiple *rawtransaction commands
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
# Test `createrawtransaction` required parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction)
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [])
# Test `createrawtransaction` invalid extra parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, False, 'foo')
# Test `createrawtransaction` invalid `inputs`
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {})
assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{}], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, vout must be positive", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': -1}], {})
assert_raises_rpc_error(-8, "Invalid parameter, sequence number is out of range", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 0, 'sequence': -1}], {})
# Test `createrawtransaction` invalid `outputs`
address = self.nodes[0].getnewaddress()
assert_raises_rpc_error(-3, "Expected type object", self.nodes[0].createrawtransaction, [], 'foo')
assert_raises_rpc_error(-8, "Data must be hexadecimal string", self.nodes[0].createrawtransaction, [], {'data': 'foo'})
assert_raises_rpc_error(-5, "Invalid QQcoin address", self.nodes[0].createrawtransaction, [], {'foo': 0})
assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].createrawtransaction, [], {address: 'foo'})
assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].createrawtransaction, [], {address: -1})
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)]))
# Test `createrawtransaction` invalid `locktime`
assert_raises_rpc_error(-3, "Expected type number", self.nodes[0].createrawtransaction, [], {}, 'foo')
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, -1)
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, 4294967296)
# Test `createrawtransaction` invalid `replaceable`
assert_raises_rpc_error(-3, "Expected type bool", self.nodes[0].createrawtransaction, [], {}, 0, 'foo')
#########################################
# sendrawtransaction with missing input #
#########################################
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransaction(rawtx)
# This will raise an exception since there are missing inputs
assert_raises_rpc_error(-25, "Missing inputs", self.nodes[2].sendrawtransaction, rawtx['hex'])
#####################################
# getrawtransaction with block hash #
#####################################
# make a tx by sending then generate 2 blocks; block1 has the tx in it
tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1)
block1, block2 = self.nodes[2].generate(2)
self.sync_all()
# We should be able to get the raw transaction by providing the correct block
gottx = self.nodes[0].getrawtransaction(tx, True, block1)
assert_equal(gottx['txid'], tx)
assert_equal(gottx['in_active_chain'], True)
# We should not have the 'in_active_chain' flag when we don't provide a block
gottx = self.nodes[0].getrawtransaction(tx, True)
assert_equal(gottx['txid'], tx)
assert 'in_active_chain' not in gottx
# We should not get the tx if we provide an unrelated block
assert_raises_rpc_error(-5, "No such transaction found", self.nodes[0].getrawtransaction, tx, True, block2)
# An invalid block hash should raise the correct errors
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, True)
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, "foobar")
assert_raises_rpc_error(-8, "parameter 3 must be of length 64", self.nodes[0].getrawtransaction, tx, True, "abcd1234")
assert_raises_rpc_error(-5, "Block hash not found", self.nodes[0].getrawtransaction, tx, True, "0000000000000000000000000000000000000000000000000000000000000000")
# Undo the blocks and check in_active_chain
self.nodes[0].invalidateblock(block1)
gottx = self.nodes[0].getrawtransaction(txid=tx, verbose=True, blockhash=block1)
assert_equal(gottx['in_active_chain'], False)
self.nodes[0].reconsiderblock(block1)
assert_equal(self.nodes[0].getbestblockhash(), block2)
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 BTC to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
addr3Obj = self.nodes[2].validateaddress(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS A INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransaction(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransaction(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# 2of2 test for combining transactions
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
mSigObjValid = self.nodes[2].validateaddress(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal) # the funds of a 2of2 multisig tx should not be marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx2['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "redeemScript" : mSigObjValid['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned1 = self.nodes[1].signrawtransaction(rawTx2, inputs)
self.log.info(rawTxPartialSigned1)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxPartialSigned2 = self.nodes[2].signrawtransaction(rawTx2, inputs)
self.log.info(rawTxPartialSigned2)
assert_equal(rawTxPartialSigned2['complete'], False) #node2 only has one key, can't comp. sign the tx
rawTxComb = self.nodes[2].combinerawtransaction([rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']])
self.log.info(rawTxComb)
self.nodes[2].sendrawtransaction(rawTxComb)
rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# decoderawtransaction tests
# witness transaction
encrawtx = "010000000001010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f50500000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, True) # decode as witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # force decode as non-witness transaction
# non-witness transaction
encrawtx = "01000000010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f505000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, False) # decode as non-witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
# getrawtransaction tests
# 1. valid parameters - only supply txid
txHash = rawTx["hash"]
assert_equal(self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex'])
# 2. valid parameters - supply txid and 0 for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex'])
# 3. valid parameters - supply txid and False for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, False), rawTxSigned['hex'])
# 4. valid parameters - supply txid and 1 for verbose.
# We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
assert_equal(self.nodes[0].getrawtransaction(txHash, 1)["hex"], rawTxSigned['hex'])
# 5. valid parameters - supply txid and True for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, True)["hex"], rawTxSigned['hex'])
# 6. invalid parameters - supply txid and string "Flase"
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, "Flase")
# 7. invalid parameters - supply txid and empty array
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, [])
# 8. invalid parameters - supply txid and empty dict
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, {})
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
# 9. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
# 10. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
if __name__ == '__main__':
RawTransactionsTest().main()
|
py | 1a5212987463b3fa3a01a7e60da952f96fdd9e29 |
def Show(path=None):
import sys
import os
from Qt import QtWidgets
app = QtWidgets.QApplication(sys.argv)
win = Create()
if path:
if os.path.isfile(path) and os.path.splitext(path)[-1].lower() == ".blcs":
win.openScene(path)
win.show()
win.fitInView()
app.exec_()
def Create(parent=None):
from . import main
from .. import workerManager
from .. import const
workerManager.WorkerManager.SetLogLevel(const.LogLevel.NoLog)
workerManager.WorkerManager.SetUseProcess(False)
return main.MainWindow(parent=parent)
|
py | 1a521334f9759e68ad6bef05b0d807a818835ec9 | #!/usr/bin/python
# sends a message to datagram destinations opened by samForward.py and samIn.py, using specified sending session name
# at least samForward.py should be running for results to be seen
# usage : ./samOut.py [ sendingSessionName [ message ... ] ]
# sendingSessionName : default = datagramSamForward
# message : default = "this is nice message"
import socket
import sys
import time
if len(sys.argv)>=2 :
name = sys.argv[1]
else :
name = "datagramSamForward"
if len(sys.argv)>2 :
message = ''.join([s+' ' for s in sys.argv[2:]]).strip()
else :
message = "This is a nice message"
# client.py
port = 7655
host = "localhost"
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("", 0))
s.sendto("3.0 "+name+" tYhjbFlFL38WFuO5eCzTvE0UBr4RfaqWMKlekGeMoB-Ouz7nYaWfiS-9j3jMiZT7FH~pwdmoSREOs2ZbXK84sR59P~pPfeCMxnJrk57f3U9uKzXkesjkKWYco3YAGs-G8sw8Fu2FBx0Do57yBdA9~j8Zq6pMjmgPBXCLuXG3vo0Z8zUWCjApJyFY6OXYopHck9Fz9vKy7YhC6zXFHfEuNHVkAooduiLd~aCoGij0TW3lH2rTVU-lx-DUdi6edxQ5-RvDNkXfikvytoCpRkivbNVytjCJLk~7RNU4FpBD20wTZWNJmEG3OY3cjNjawJVFdNjtgczh9K7gZ7ad-NjVjZVhXEj1lU8mk~vAH-2QE5om8dstWUwWoNDwmVDlvIJNKzQmahG~VrpFexFHXO0n3fKIXcSgWGOHDExM8w9neCt7AxUjxPDtXXuYNW~bRwcfiL-C9~z4K9rmwiTPZX0lmsToSXTF28l7WAoj~TMT9kZAjQeFRRWU5oW5oxVuonVvAAAA\n"+message, (host, port))
s.sendto("3.0 "+name+" EYUpJFeW9tiubXR0aOjvCJ~ndj3xN0Wn-ljuGdbpOEttPg7nj0VCTOQDJ~FAolzn9FIDdmR3VjM0OFFDT46Q5HN4vShXFE2VNC8e3~GjzxJfaJhijRC2R9oIOzsNlzKtInD2o9lh0PxPioNMCigwmgWuqlQHs4tjWeaYRAtooHxbrtuoCIhIdGfyVV-nAcPiyYbouKq3leETXE~4kBXm-LfWfyPtrv6OuDk3GBVVcthv19GYBmnl2YI8HpJjc-G-TvNkgYishjzIJyEW-Xrpy43R4ZBXlyQqnheGLlbOEY8NLDbyNHLRMMOGbcr~67SVE3Iw3RqQ3Dhrkq2FCaQwcDucfIUCCbOfCZgu0hlnCkS42xsUvegQeiwMxbdI~h9v7vcR3yFFOrHX6WQvIZSbFLKNGArGJcfmOJVLqw1wTC4AgYXjk3csVDPd-QWbMXOuodyBgrg27Ds2BBYTsVXWskoo6ASsMIQZ6jMfL7PkY9dPLCRParIyzb9aPmf~MntNAAAA\n"+message, (host, port))
|
py | 1a52138ef54e4190aeb42f6bee654f01b89b9b8d | # pylint: disable=no-self-use,invalid-name
import numpy
from flaky import flaky
from deep_qa.models.reading_comprehension import BidirectionalAttentionFlow
from deep_qa.common.params import Params
from ...common.test_case import DeepQaTestCase
class TestBidirectionalAttentionFlow(DeepQaTestCase):
@flaky
def test_trains_and_loads_correctly(self):
self.write_span_prediction_files()
args = Params({
'embedding_dim': {'words': 4, 'characters': 4},
'save_models': True,
'tokenizer': {'type': 'words and characters'},
'show_summary_with_masking_info': True,
})
self.ensure_model_trains_and_loads(BidirectionalAttentionFlow, args)
def test_get_best_span(self):
# Note that the best span cannot be (1, 0) since even though 0.3 * 0.5 is the greatest
# value, the end span index is constrained to occur after the begin span index.
span_begin_probs = numpy.array([0.1, 0.3, 0.05, 0.3, 0.25])
span_end_probs = numpy.array([0.5, 0.1, 0.2, 0.05, 0.15])
begin_end_idxs = BidirectionalAttentionFlow.get_best_span(span_begin_probs,
span_end_probs)
assert begin_end_idxs == (1, 2)
# Testing an edge case of the dynamic program here, for the order of when you update the
# best previous span position. We should not get (1, 1), because that's an empty span.
span_begin_probs = numpy.array([0.4, 0.5, 0.1])
span_end_probs = numpy.array([0.3, 0.6, 0.1])
begin_end_idxs = BidirectionalAttentionFlow.get_best_span(span_begin_probs,
span_end_probs)
assert begin_end_idxs == (0, 1)
# test higher-order input
# Note that the best span cannot be (1, 1) since even though 0.3 * 0.5 is the greatest
# value, the end span index is constrained to occur after the begin span index.
span_begin_probs = numpy.array([[0.1, 0.3, 0.05, 0.3, 0.25]])
span_end_probs = numpy.array([[0.1, 0.5, 0.2, 0.05, 0.15]])
begin_end_idxs = BidirectionalAttentionFlow.get_best_span(span_begin_probs,
span_end_probs)
assert begin_end_idxs == (1, 2)
|
py | 1a52141b056e0e47d3d3f15337cf18f9facfce97 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-25 22:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('personas', '0005_auto_20160925_1210'),
]
operations = [
migrations.AlterField(
model_name='odc',
name='DescripODC',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
|
py | 1a52143e216de868a987c22fece62ca45fc24837 | # dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 1024)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 1024),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=3, #4会超出内存
workers_per_gpu=2,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='leftImg8bit/train',
ann_dir='gtFine/train',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='leftImg8bit/val',
ann_dir='gtFine/val',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='leftImg8bit/test',
# img_dir='leftImg8bit/val',
ann_dir='gtFine/test',
# ann_dir='gtFine/val',
pipeline=test_pipeline))
|
py | 1a5214829373679542441a9a217953afb47de7cc | import random
import altair
import matplotlib.pyplot as plt
import pandas as pd
from bokeh.plotting import figure as bokeh_figure
from vizno.report import Report
xs = [random.random() for _ in range(100)]
ys = [x + random.random() * 0.1 for x in xs]
f = plt.figure()
ax = f.add_subplot(111)
ax.plot(xs, ys, ".")
ax.set_xlabel("Label")
chart = (
altair.Chart(
pd.DataFrame(
{
"a": xs,
"b": ys,
}
)
)
.mark_circle(size=20)
.encode(x="a", y="b")
)
plot = bokeh_figure(plot_width=400, plot_height=300)
plot.circle(xs, ys)
r = Report.magic(title="Magic report", description="A magically gathered report")
|
py | 1a521543f084e2733fcfd297efbc0712d68aa2d0 | from sys import argv
script, filename = argv
txt = open(filename)
print("Here's your file %r: " % filename)
print(txt.read())
print("Type the filename again:")
file_again = input('> ')
txt_again = open(file_again)
print(txt_again.read())
# --Output--
# $ python exe15.py test15.txt
# Here's your file 'test15.txt':
# This is stuff I typed into a file.
# It is really cool stuff.
# Lots and lots of fun to have in here.
# Type the filename again:
# > test15.txt
# This is stuff I typed into a file.
# It is really cool stuff.
# Lots and lots of fun to have in here.
|
py | 1a52169c39499ea4ea0447e7bd76223eea45a890 | import datetime
from dateutil.parser import parse
from mongoengine import DateTimeField, FileField
from mongoengine.connection import DEFAULT_CONNECTION_NAME
from mongoengine.python_support import str_types
import StringIO
from django.conf import settings
if settings.FILE_DB == settings.S3:
import crits.core.s3_tools as S3
class CritsDateTimeField(DateTimeField):
"""
Custom MongoEngine DateTimeField. Utilizes a transform such that if the
value passed in is a string we will convert it to a datetime.datetime
object, or if it is set to None we will use the current datetime (useful
when instantiating new objects and wanting the default dates to all be the
current datetime).
"""
def __set__(self, instance, value):
value = self.transform(value)
return super(CritsDateTimeField, self).__set__(instance, value)
def transform(self, value):
if value and isinstance(value, basestring):
return parse(value, fuzzy=True)
elif not value:
return datetime.datetime.now()
else:
return value
class S3Proxy(object):
"""
Custom proxy for MongoEngine which uses S3 to store binaries instead of
GridFS.
"""
def __init__(self, grid_id=None, key=None, instance=None,
db_alias=DEFAULT_CONNECTION_NAME, collection_name='fs'):
self.grid_id = grid_id # Store id for file
self.key = key
self.instance = instance
self.db_alias = db_alias
self.collection_name = collection_name
self.newfile = None # Used for partial writes
self.gridout = None
def __getattr__(self, name):
attrs = ('_fs', 'grid_id', 'key', 'instance', 'db_alias',
'collection_name', 'newfile', 'gridout')
if name in attrs:
return self.__getattribute__(name)
obj = self.get()
if name in dir(obj):
return getattr(obj, name)
raise AttributeError
def __get__(self, instance, value):
return self
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.grid_id)
def delete(self):
# Delete file from S3, FileField still remains
S3.delete_file_s3(self.grid_id,self.collection_name)
self.grid_id = None
self.gridout = None
self._mark_as_changed()
def get(self, id=None):
if id:
self.grid_id = id
if self.grid_id is None:
return None
try:
if self.gridout is None:
self.gridout = StringIO.StringIO(S3.get_file_s3(self.grid_id, self.collection_name))
return self.gridout
except:
return None
def put(self, file_obj, **kwargs):
if self.grid_id:
raise Exception('This document already has a file. Either delete '
'it or call replace to overwrite it')
self.grid_id = S3.put_file_s3(file_obj, self.collection_name)
self._mark_as_changed()
def read(self, size=-1):
gridout = self.get()
if gridout is None:
return None
else:
try:
return gridout.read(size)
except:
return ""
def _mark_as_changed(self):
"""Inform the instance that `self.key` has been changed"""
if self.instance:
self.instance._mark_as_changed(self.key)
class S3FileField(FileField):
"""
Custom FileField for MongoEngine which utilizes S3.
"""
def __init__(self, db_alias=DEFAULT_CONNECTION_NAME, collection_name="fs",
**kwargs):
super(S3FileField, self).__init__(db_alias, collection_name, **kwargs)
self.proxy_class = S3Proxy
def __set__(self, instance, value):
key = self.name
if ((hasattr(value, 'read') and not
isinstance(value, self.proxy_class)) or isinstance(value, str_types)):
# using "FileField() = file/string" notation
grid_file = instance._data.get(self.name)
# If a file already exists, delete it
if grid_file:
try:
grid_file.delete()
except:
pass
# Create a new file with the new data
grid_file.put(value)
else:
# Create a new proxy object as we don't already have one
instance._data[key] = self.proxy_class(key=key, instance=instance,
collection_name=self.collection_name)
instance._data[key].put(value)
else:
instance._data[key] = value
instance._mark_as_changed(key)
def getFileField(db_alias=DEFAULT_CONNECTION_NAME, collection_name="fs", **kwargs):
"""
Determine if the admin has configured CRITs to utilize GridFS or S3 for
binary storage.
"""
if settings.FILE_DB == settings.GRIDFS:
return FileField(db_alias, collection_name, **kwargs)
elif settings.FILE_DB == settings.S3:
return S3FileField(db_alias, collection_name, **kwargs)
|
py | 1a5217ac567bd3b4174cf390407c8b2b9b2877f6 | from django.shortcuts import render
from django.contrib.auth.decorators import login_required
# Create your views here.
from django.views.generic import View
from django.contrib.messages.views import SuccessMessageMixin
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.views.generic.base import TemplateView, \
TemplateResponseMixin, \
ContextMixin
from django.utils.decorators import method_decorator
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.views.generic.edit import CreateView,\
UpdateView, \
DeleteView, \
ModelFormMixin
from django.utils import timezone
from .models import Book
from .forms import BookForm
class LoginRequiredMixin(object):
# @classmethod
# def as_view(cls, **kwargs):
# view = super(LoginRequiredMixin, cls).as_view(**kwargs)
# return login_required(view)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs)
class MultipleObjectMixin(object):
def get_object(self, queryset=None, *args, **kwargs):
slug = self.kwargs.get("slug")
print("SLUG:", slug)
if slug:
try:
obj = self.model.objects.get(slug=slug)
except self.model.MultipleObjectsReturned:
obj = self.get_queryset().first()
except:
raise Http404
return obj
raise Http404
class BookDeleteView(MultipleObjectMixin, DeleteView):
model = Book
success_url = reverse_lazy('book_list')
class BookUpdateView(MultipleObjectMixin, UpdateView):
model = Book
form_class = BookForm
template_name = "forms.html"
# def TestView(request):
# form = TestForm(request.POST or None)
# if form.is_valid():
# print(form.cleaned_data)
# print(form.cleaned_data.get("some_text"))
# print(form.cleaned_data.get("email"))
# print(form.cleaned_data.get("email2"))
#
# return render(request, "forms.html", {"form": form})
class BookCreateView(SuccessMessageMixin, MultipleObjectMixin, CreateView):
form_class = BookForm
template_name = "forms.html"
success_message = "%(title)s has been created at %(created_at)s"
#to add new fields to the model
def form_valid(self, form):
form.instance.added_by = self.request.user
return super(BookCreateView, self).form_valid(form)
def get_success_url(self):
return reverse("book_list")
def get_success_message(self, cleaned_data):
return self.success_message % dict(
cleaned_data,
created_at=self.object.timestamp,
)
class AboutView(TemplateView):
template_name = "about.html"
def get_context_data(self, **kwargs):
context = super(AboutView, self).get_context_data(**kwargs)
context['title'] = 'Qaisar Khan'
return context
class MyView(LoginRequiredMixin, ContextMixin, TemplateResponseMixin, View):
template_name = "about.html"
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
context['title'] = 'Some other Title'
return self.render_to_response(context)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(MyView, self).dispatch(request, *args, **kwargs)
class BookListView(ListView):
model = Book
def get_queryset(self, *args, **kwargs):
#qs = super(BookListView, self).get_queryset(*args, **kwargs).filter(title__startswith="Ye")
qs = super(BookListView, self).get_queryset(*args, **kwargs).order_by("-timestamp")
print qs
return qs
def get_context_data(self, **kwargs):
context = super(BookListView, self).get_context_data(**kwargs)
context['now'] = timezone.now()
return context
class BookDetailView(SuccessMessageMixin, ModelFormMixin, MultipleObjectMixin, DetailView):
model = Book
form_class = BookForm
success_message = "%(title)s has been updated."
#template_name = "forms.html"
def get_context_data(self, *args, **kwargs):
context = super(BookDetailView, self).get_context_data(*args, **kwargs)
context["form"] = self.get_form()
context['now'] = timezone.now()
context['btn_title'] = "Update Book Detail"
return context
@method_decorator(login_required)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def get_success_url(self):
return reverse("book_list")
|
py | 1a521824f6a164cdf1fda4fecf6f4a79b6d3f1d5 | """Contains domain batch classes.
"""
__author__ = 'Paul Landes'
from typing import Tuple, Type, Any
from dataclasses import dataclass, field
import copy as cp
from zensols.config import Settings
from zensols.persist import persisted
from zensols.deeplearn.batch import (
DataPoint,
Batch,
BatchStash,
ManagerFeatureMapping,
FieldFeatureMapping,
BatchFeatureMapping,
)
from zensols.nlp import (
FeatureSentence, FeatureDocument, TokenAnnotatedFeatureSentence
)
from zensols.deeplearn.result import ResultsContainer
from zensols.deeplearn.vectorize import (
FeatureVectorizerManager, FeatureVectorizer
)
from zensols.deepnlp.batch import FeatureSentenceDataPoint
from zensols.deepnlp.classify import ClassificationPredictionMapper
@dataclass
class NERPredictionMapper(ClassificationPredictionMapper):
def _create_data_point(self, cls: Type[DataPoint],
feature: Any) -> DataPoint:
return cls(None, self.batch_stash, feature, True)
def _create_features(self, sent_text: str) -> Tuple[FeatureSentence]:
doc: FeatureDocument = self.vec_manager.parse(sent_text)
self._docs.append(doc)
return doc.sents
def map_results(self, result: ResultsContainer) -> Settings:
classes = self._map_classes(result)
return Settings(classes=tuple(classes), docs=tuple(self._docs))
@dataclass
class NERDataPoint(FeatureSentenceDataPoint):
is_pred: bool = field(default=False)
def __post_init__(self):
self.sent = TokenAnnotatedFeatureSentence(
self.sent.sent_tokens, self.sent.text, self.ents)
if self.is_pred:
self._map_syn(self.sent)
self._map_tag(self.sent)
def _map_syn(self, sent: FeatureSentence):
"""Map from spaCy POS tags to the corpus *syntactic chunk*."""
last = None
outs = set('CC .'.split())
for t in sent:
syn = 'NP'
tag = t.tag_
if tag.startswith('V') or tag == 'TO':
syn = 'VP'
elif tag == 'IN':
syn = 'PP'
elif tag in outs:
syn = 'O'
elif tag == 'ROOT':
last = None
if syn == 'O':
stag = syn
else:
stag = 'I' if last == syn else 'B'
stag = f'{stag}-{syn}'
last = syn
t.syn_ = stag
def _map_tag(self, sent: FeatureSentence):
stash: BatchStash = self.batch_stash
mng: FeatureVectorizerManager = \
stash.vectorizer_manager_set['language_feature_manager']
vec: FeatureVectorizer = mng['tag']
labs = set(vec.label_encoder.classes_)
for t in sent:
if t.tag_ not in labs:
t.tag_ = ','
@property
@persisted('_ents', transient=True)
def ents(self) -> Tuple[str]:
"""The label: the fourth the named entity tag."""
if self.is_pred:
return tuple([None] * len(self.sent))
else:
return tuple(map(lambda t: t.ent_, self.sent.token_iter()))
@property
def trans_doc(self) -> FeatureDocument:
"""The document used by the transformer vectorizers. Return ``None`` for
prediction data points to avoid vectorization.
"""
if self.is_pred:
return None
return self.doc
@dataclass
class NERBatch(Batch):
LANGUAGE_FEATURE_MANAGER_NAME = 'language_feature_manager'
GLOVE_50_EMBEDDING = 'glove_50_embedding'
GLOVE_300_EMBEDDING = 'glove_300_embedding'
WORD2VEC_300_EMBEDDING = 'word2vec_300_embedding'
TRANSFORMER_FIXED_EMBEDDING = 'transformer_fixed_embedding'
TRANSFORMER_TRAINABLE_EMBEDDING = 'transformer_trainable_embedding'
TRANSFORMER_TRAINABLE_MODEL_NAME = 'transformer_trainable'
EMBEDDING_ATTRIBUTES = {GLOVE_50_EMBEDDING, GLOVE_300_EMBEDDING,
WORD2VEC_300_EMBEDDING, TRANSFORMER_FIXED_EMBEDDING,
TRANSFORMER_TRAINABLE_EMBEDDING}
MAPPINGS = BatchFeatureMapping(
'ents',
[ManagerFeatureMapping(
'label_vectorizer_manager',
(FieldFeatureMapping('ents', 'entlabel', True, is_label=True),
FieldFeatureMapping('mask', 'mask', True, 'ents'),
)),
ManagerFeatureMapping(
LANGUAGE_FEATURE_MANAGER_NAME,
(FieldFeatureMapping('tags', 'tag', True, 'doc'),
FieldFeatureMapping('syns', 'syn', True, 'doc'),
FieldFeatureMapping(GLOVE_50_EMBEDDING, 'wvglove50', True, 'doc'),
FieldFeatureMapping(GLOVE_300_EMBEDDING, 'wvglove300', True, 'doc'),
FieldFeatureMapping(WORD2VEC_300_EMBEDDING, 'w2v300', True, 'doc'),
FieldFeatureMapping(TRANSFORMER_TRAINABLE_EMBEDDING, TRANSFORMER_TRAINABLE_MODEL_NAME, True, 'doc'),
FieldFeatureMapping('tags_expander', 'transformer_tags_expander', True, 'doc'),
FieldFeatureMapping('syns_expander', 'transformer_syns_expander', True, 'doc'),
FieldFeatureMapping('ents_trans', 'entlabel_trans', True, 'trans_doc', is_label=True),
),)])
TRANS_MAPPINGS = cp.deepcopy(MAPPINGS)
TRANS_MAPPINGS.label_attribute_name = 'ents_trans'
def _get_batch_feature_mappings(self) -> BatchFeatureMapping:
stash: BatchStash = self.batch_stash
if 'ents_trans' in stash.decoded_attributes:
maps = self.TRANS_MAPPINGS
else:
maps = self.MAPPINGS
return maps
|
py | 1a521881c6c5b56870306a0972fee04dde297a2b | # Copyright The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Generates an Android.bp file from the json output of a 'gn desc' command.
# Example usage:
# gn desc out/Android --format=json "*" > desc.json
# python scripts/generate_android_bp.py desc.json > Android.bp
import json
import sys
import re
import os
import argparse
from datetime import date
root_targets = [
"//:libGLESv2",
"//:libGLESv1_CM",
"//:libEGL",
"//:libfeature_support",
]
sdk_version = '28'
stl = 'libc++_static'
def tabs(indent):
return ' ' * (indent * 4)
def has_child_values(value):
# Elements of the blueprint can be pruned if they are empty lists or dictionaries of empty
# lists
if isinstance(value, list):
return len(value) > 0
if isinstance(value, dict):
for (item, item_value) in value.items():
if has_child_values(item_value):
return True
return False
# This is a value leaf node
return True
def write_blueprint_key_value(output, name, value, indent=1):
if not has_child_values(value):
return
if isinstance(value, set) or isinstance(value, list):
value = list(sorted(set(value)))
if isinstance(value, list):
output.append(tabs(indent) + '%s: [' % name)
for item in value:
output.append(tabs(indent + 1) + '"%s",' % item)
output.append(tabs(indent) + '],')
return
if isinstance(value, dict):
if not value:
return
output.append(tabs(indent) + '%s: {' % name)
for (item, item_value) in value.items():
write_blueprint_key_value(output, item, item_value, indent + 1)
output.append(tabs(indent) + '},')
return
if isinstance(value, bool):
output.append(tabs(indent) + '%s: %s,' % (name, 'true' if value else 'false'))
return
output.append(tabs(indent) + '%s: "%s",' % (name, value))
def write_blueprint(output, target_type, values):
output.append('%s {' % target_type)
for (key, value) in values.items():
write_blueprint_key_value(output, key, value)
output.append('}')
def gn_target_to_blueprint_target(target, target_info):
if 'output_name' in target_info:
return target_info['output_name']
# Split the gn target name (in the form of //gn_file_path:target_name) into gn_file_path and
# target_name
target_regex = re.compile(r"^//([a-zA-Z0-9\-_/]*):([a-zA-Z0-9\-_\.]+)$")
match = re.match(target_regex, target)
assert match != None
gn_file_path = match.group(1)
target_name = match.group(2)
assert len(target_name) > 0
# Clean up the gn file path to be a valid blueprint target name.
gn_file_path = gn_file_path.replace("/", "_").replace(".", "_").replace("-", "_")
# Generate a blueprint target name by merging the gn path and target so each target is unique.
# Prepend the 'angle' prefix to all targets in the root path (empty gn_file_path). Skip this step if the target name already starts with 'angle' to avoid target names such as 'angle_angle_common'.
root_prefix = "angle"
if len(gn_file_path) == 0 and not target_name.startswith(root_prefix):
gn_file_path = root_prefix
# Avoid names such as _angle_common if the gn_file_path is empty.
if len(gn_file_path) > 0:
gn_file_path += "_"
return gn_file_path + target_name
def remap_gn_path(path):
# TODO: pass the gn gen folder as an arg so it is future proof. b/150457277
remap_folders = [
('out/Android/gen/angle/', ''),
('out/Android/gen/', ''),
]
remapped_path = path
for (remap_source, remap_dest) in remap_folders:
remapped_path = remapped_path.replace(remap_source, remap_dest)
return remapped_path
def gn_path_to_blueprint_path(source):
# gn uses '//' to indicate the root directory, blueprint uses the .bp file's location
return remap_gn_path(re.sub(r'^//?', '', source))
def gn_paths_to_blueprint_paths(paths):
rebased_paths = []
for path in paths:
rebased_paths.append(gn_path_to_blueprint_path(path))
return rebased_paths
def gn_sources_to_blueprint_sources(sources):
# Blueprints only list source files in the sources list. Headers are only referenced though
# include paths.
file_extension_whitelist = [
'.c',
'.cc',
'.cpp',
]
rebased_sources = []
for source in sources:
if os.path.splitext(source)[1] in file_extension_whitelist:
rebased_sources.append(gn_path_to_blueprint_path(source))
return rebased_sources
target_blackist = [
'//build/config:shared_library_deps',
]
include_blacklist = [
]
def gn_deps_to_blueprint_deps(target_info, build_info):
static_libs = []
shared_libs = []
defaults = []
generated_headers = []
header_libs = []
if not 'deps' in target_info:
return (static_libs, defaults)
for dep in target_info['deps']:
if not dep in target_blackist:
dep_info = build_info[dep]
blueprint_dep_name = gn_target_to_blueprint_target(dep, dep_info)
# Depending on the dep type, blueprints reference it differently.
gn_dep_type = dep_info['type']
if gn_dep_type == 'static_library':
static_libs.append(blueprint_dep_name)
elif gn_dep_type == 'shared_library':
shared_libs.append(blueprint_dep_name)
elif gn_dep_type == 'source_set' or gn_dep_type == 'group':
defaults.append(blueprint_dep_name)
elif gn_dep_type == 'action':
generated_headers.append(blueprint_dep_name)
# Blueprints do not chain linking of static libraries.
(child_static_libs, _, _, child_generated_headers, _) = gn_deps_to_blueprint_deps(
dep_info, build_info)
# Each target needs to link all child static library dependencies.
static_libs += child_static_libs
# Each blueprint target runs genrules in a different output directory unlike GN. If a
# target depends on another's genrule, it wont find the outputs. Propogate generated
# headers up the dependency stack.
generated_headers += child_generated_headers
return (static_libs, shared_libs, defaults, generated_headers, header_libs)
def gn_libs_to_blueprint_shared_libraries(target_info):
lib_blackist = [
'android_support',
]
result = []
if 'libs' in target_info:
for lib in target_info['libs']:
if not lib in lib_blackist:
android_lib = lib if '@' in lib else 'lib' + lib
result.append(android_lib)
return result
def gn_include_dirs_to_blueprint_include_dirs(target_info):
result = []
if 'include_dirs' in target_info:
for include_dir in target_info['include_dirs']:
if not include_dir in include_blacklist:
result.append(gn_path_to_blueprint_path(include_dir))
return result
def escape_quotes(str):
return str.replace("\"", "\\\"").replace("\'", "\\\'")
angle_cpu_bits_define = r'^ANGLE_IS_[0-9]+_BIT_CPU$'
def gn_cflags_to_blueprint_cflags(target_info):
result = []
# Only forward cflags that disable warnings
cflag_whitelist = r'^-Wno-.*$'
for cflag_type in ['cflags', 'cflags_c', 'cflags_cc']:
if cflag_type in target_info:
for cflag in target_info[cflag_type]:
if re.search(cflag_whitelist, cflag):
result.append(cflag)
# Chrome and Android use different versions of Clang which support differnt warning options.
# Ignore errors about unrecognized warning flags.
result.append('-Wno-unknown-warning-option')
if 'defines' in target_info:
for define in target_info['defines']:
# Don't emit ANGLE's CPU-bits define here, it will be part of the arch-specific
# information later
if not re.search(angle_cpu_bits_define, define):
result.append('-D%s' % escape_quotes(define))
return result
def gn_arch_specific_to_blueprint(target_info):
arch_infos = {
'arm': {
'bits': 32
},
'arm64': {
'bits': 64
},
'x86': {
'bits': 32
},
'x86_64': {
'bits': 64
},
}
result = {}
for (arch_name, arch_info) in arch_infos.items():
result[arch_name] = {'cflags': []}
# If the target has ANGLE's CPU-bits define, replace it with the arch-specific bits here.
if 'defines' in target_info:
for define in target_info['defines']:
if re.search(angle_cpu_bits_define, define):
for (arch_name, arch_info) in arch_infos.items():
result[arch_name]['cflags'].append('-DANGLE_IS_%d_BIT_CPU' % arch_info['bits'])
return result
blueprint_library_target_types = {
"static_library": "cc_library_static",
"shared_library": "cc_library_shared",
"source_set": "cc_defaults",
"group": "cc_defaults",
}
def library_target_to_blueprint(target, build_info):
target_info = build_info[target]
blueprint_type = blueprint_library_target_types[target_info['type']]
bp = {}
bp['name'] = gn_target_to_blueprint_target(target, target_info)
if 'sources' in target_info:
bp['srcs'] = gn_sources_to_blueprint_sources(target_info['sources'])
(bp['static_libs'], bp['shared_libs'], bp['defaults'], bp['generated_headers'],
bp['header_libs']) = gn_deps_to_blueprint_deps(target_info, build_info)
bp['shared_libs'] += gn_libs_to_blueprint_shared_libraries(target_info)
bp['local_include_dirs'] = gn_include_dirs_to_blueprint_include_dirs(target_info)
bp['cflags'] = gn_cflags_to_blueprint_cflags(target_info)
bp['arch'] = gn_arch_specific_to_blueprint(target_info)
bp['sdk_version'] = sdk_version
bp['stl'] = stl
return (blueprint_type, bp)
def gn_action_args_to_blueprint_args(blueprint_inputs, blueprint_outputs, args):
# TODO: pass the gn gen folder as an arg so we know how to get from the gen path to the root
# path. b/150457277
remap_folders = [
('../../', ''),
('gen/', ''),
]
result_args = []
for arg in args:
# Attempt to find if this arg is a path to one of the inputs. If it is, use the blueprint
# $(location <path>) argument instead so the path gets remapped properly to the location
# that the script is run from
remapped_path_arg = arg
for (remap_source, remap_dest) in remap_folders:
remapped_path_arg = remapped_path_arg.replace(remap_source, remap_dest)
if remapped_path_arg in blueprint_inputs or remapped_path_arg in blueprint_outputs:
result_args.append('$(location %s)' % remapped_path_arg)
elif os.path.basename(remapped_path_arg) in blueprint_outputs:
result_args.append('$(location %s)' % os.path.basename(remapped_path_arg))
else:
result_args.append(remapped_path_arg)
return result_args
blueprint_gen_types = {
"action": "cc_genrule",
}
def action_target_to_blueprint(target, build_info):
target_info = build_info[target]
blueprint_type = blueprint_gen_types[target_info['type']]
bp = {}
bp['name'] = gn_target_to_blueprint_target(target, target_info)
# Blueprints use only one 'srcs', merge all gn inputs into one list.
gn_inputs = []
if 'inputs' in target_info:
gn_inputs += target_info['inputs']
if 'sources' in target_info:
gn_inputs += target_info['sources']
bp_srcs = gn_paths_to_blueprint_paths(gn_inputs)
bp['srcs'] = bp_srcs
# genrules generate the output right into the 'root' directory. Strip any path before the
# file name.
bp_outputs = []
for gn_output in target_info['outputs']:
bp_outputs.append(os.path.basename(gn_output))
bp['out'] = bp_outputs
bp['tool_files'] = [gn_path_to_blueprint_path(target_info['script'])]
# Generate the full command, $(location) refers to tool_files[0], the script
cmd = ['$(location)'] + gn_action_args_to_blueprint_args(bp_srcs, bp_outputs,
target_info['args'])
bp['cmd'] = ' '.join(cmd)
bp['sdk_version'] = sdk_version
return (blueprint_type, bp)
def gn_target_to_blueprint(target, build_info):
gn_type = build_info[target]['type']
if gn_type in blueprint_library_target_types:
return library_target_to_blueprint(target, build_info)
elif gn_type in blueprint_gen_types:
return action_target_to_blueprint(target, build_info)
else:
raise RuntimeError("Unknown gn target type: " + gn_type)
def get_gn_target_dependencies(output_dependencies, build_info, target):
output_dependencies.insert(0, target)
for dep in build_info[target]['deps']:
if dep in target_blackist:
# Blacklisted dep
continue
if dep in output_dependencies:
# Already added this dep
continue
if not dep in build_info:
# No info for this dep, skip it
continue
# Recurse
get_gn_target_dependencies(output_dependencies, build_info, dep)
def main():
parser = argparse.ArgumentParser(
description='Generate Android blueprints from gn descriptions.')
parser.add_argument(
'gn_json',
help='gn desc in json format. Generated with \'gn desc <out_dir> --format=json "*"\'.')
args = parser.parse_args()
with open(args.gn_json, 'r') as f:
build_info = json.load(f)
targets_to_write = []
for root_target in root_targets:
get_gn_target_dependencies(targets_to_write, build_info, root_target)
blueprint_targets = []
for target in targets_to_write:
blueprint_targets.append(gn_target_to_blueprint(target, build_info))
# Add APKs with all of the root libraries
blueprint_targets.append(('filegroup', {
'name': 'ANGLE_srcs',
'srcs': ['src/**/*.java',],
}))
blueprint_targets.append((
'java_defaults',
{
'name':
'ANGLE_java_defaults',
'sdk_version':
'system_current',
'min_sdk_version':
sdk_version,
'compile_multilib':
'both',
'use_embedded_native_libs':
True,
'jni_libs': [
gn_target_to_blueprint_target(target, build_info[target])
for target in root_targets
],
'aaptflags': [
# Don't compress *.json files
'-0 .json',
# Give com.android.angle.common Java files access to the R class
'--extra-packages com.android.angle.common',
],
'srcs': [':ANGLE_srcs'],
'plugins': ['java_api_finder',],
'privileged':
True,
'owner':
'google',
}))
blueprint_targets.append((
'android_library',
{
'name': 'ANGLE_library',
'sdk_version': 'system_current',
'min_sdk_version': sdk_version,
'resource_dirs': ['src/android_system_settings/res',],
'asset_dirs': ['src/android_system_settings/assets',],
'aaptflags': [
# Don't compress *.json files
'-0 .json',
],
'manifest': 'src/android_system_settings/src/com/android/angle/AndroidManifest.xml',
'static_libs': ['androidx.preference_preference',],
}))
blueprint_targets.append(('android_app', {
'name': 'ANGLE',
'defaults': ['ANGLE_java_defaults'],
'static_libs': ['ANGLE_library'],
'manifest': 'src/android_system_settings/src/com/android/angle/AndroidManifest.xml',
'required': ['privapp_whitelist_com.android.angle'],
}))
output = [
"""// GENERATED FILE - DO NOT EDIT.
// Generated by %s
//
// Copyright %s The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
""" % (sys.argv[0], date.today().year)
]
for (blueprint_type, blueprint_data) in blueprint_targets:
write_blueprint(output, blueprint_type, blueprint_data)
print('\n'.join(output))
if __name__ == '__main__':
sys.exit(main())
|
py | 1a52194633512ae1a25bfa0d582a4c48726fda0a | """deCONZ sensor platform tests."""
from copy import deepcopy
from homeassistant.components.deconz.const import CONF_ALLOW_CLIP_SENSOR
from homeassistant.components.deconz.gateway import get_gateway_from_config_entry
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_POWER,
STATE_UNAVAILABLE,
)
from .test_gateway import DECONZ_WEB_REQUEST, setup_deconz_integration
SENSORS = {
"1": {
"id": "Light sensor id",
"name": "Light level sensor",
"type": "ZHALightLevel",
"state": {"lightlevel": 30000, "dark": False},
"config": {"on": True, "reachable": True, "temperature": 10},
"uniqueid": "00:00:00:00:00:00:00:00-00",
},
"2": {
"id": "Presence sensor id",
"name": "Presence sensor",
"type": "ZHAPresence",
"state": {"presence": False},
"config": {},
"uniqueid": "00:00:00:00:00:00:00:01-00",
},
"3": {
"id": "Switch 1 id",
"name": "Switch 1",
"type": "ZHASwitch",
"state": {"buttonevent": 1000},
"config": {},
"uniqueid": "00:00:00:00:00:00:00:02-00",
},
"4": {
"id": "Switch 2 id",
"name": "Switch 2",
"type": "ZHASwitch",
"state": {"buttonevent": 1000},
"config": {"battery": 100},
"uniqueid": "00:00:00:00:00:00:00:03-00",
},
"5": {
"id": "Daylight sensor id",
"name": "Daylight sensor",
"type": "Daylight",
"state": {"daylight": True, "status": 130},
"config": {},
"uniqueid": "00:00:00:00:00:00:00:04-00",
},
"6": {
"id": "Power sensor id",
"name": "Power sensor",
"type": "ZHAPower",
"state": {"current": 2, "power": 6, "voltage": 3},
"config": {"reachable": True},
"uniqueid": "00:00:00:00:00:00:00:05-00",
},
"7": {
"id": "Consumption id",
"name": "Consumption sensor",
"type": "ZHAConsumption",
"state": {"consumption": 2, "power": 6},
"config": {"reachable": True},
"uniqueid": "00:00:00:00:00:00:00:06-00",
},
"8": {
"id": "CLIP light sensor id",
"name": "CLIP light level sensor",
"type": "CLIPLightLevel",
"state": {"lightlevel": 30000},
"config": {"reachable": True},
"uniqueid": "00:00:00:00:00:00:00:07-00",
},
}
async def test_no_sensors(hass, aioclient_mock):
"""Test that no sensors in deconz results in no sensor entities."""
await setup_deconz_integration(hass, aioclient_mock)
assert len(hass.states.async_all()) == 0
async def test_sensors(hass, aioclient_mock):
"""Test successful creation of sensor entities."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = deepcopy(SENSORS)
config_entry = await setup_deconz_integration(
hass, aioclient_mock, get_state_response=data
)
gateway = get_gateway_from_config_entry(hass, config_entry)
assert len(hass.states.async_all()) == 5
light_level_sensor = hass.states.get("sensor.light_level_sensor")
assert light_level_sensor.state == "999.8"
assert light_level_sensor.attributes["device_class"] == DEVICE_CLASS_ILLUMINANCE
assert hass.states.get("sensor.presence_sensor") is None
assert hass.states.get("sensor.switch_1") is None
assert hass.states.get("sensor.switch_1_battery_level") is None
assert hass.states.get("sensor.switch_2") is None
switch_2_battery_level = hass.states.get("sensor.switch_2_battery_level")
assert switch_2_battery_level.state == "100"
assert switch_2_battery_level.attributes["device_class"] == DEVICE_CLASS_BATTERY
assert hass.states.get("sensor.daylight_sensor") is None
power_sensor = hass.states.get("sensor.power_sensor")
assert power_sensor.state == "6"
assert power_sensor.attributes["device_class"] == DEVICE_CLASS_POWER
consumption_sensor = hass.states.get("sensor.consumption_sensor")
assert consumption_sensor.state == "0.002"
assert "device_class" not in consumption_sensor.attributes
assert hass.states.get("sensor.clip_light_level_sensor") is None
# Event signals new light level
state_changed_event = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "1",
"state": {"lightlevel": 2000},
}
gateway.api.event_handler(state_changed_event)
assert hass.states.get("sensor.light_level_sensor").state == "1.6"
# Event signals new battery level
state_changed_event = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "4",
"config": {"battery": 75},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
assert hass.states.get("sensor.switch_2_battery_level").state == "75"
await hass.config_entries.async_unload(config_entry.entry_id)
states = hass.states.async_all()
assert len(hass.states.async_all()) == 5
for state in states:
assert state.state == STATE_UNAVAILABLE
await hass.config_entries.async_remove(config_entry.entry_id)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 0
async def test_allow_clip_sensors(hass, aioclient_mock):
"""Test that CLIP sensors can be allowed."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = deepcopy(SENSORS)
config_entry = await setup_deconz_integration(
hass,
aioclient_mock,
options={CONF_ALLOW_CLIP_SENSOR: True},
get_state_response=data,
)
assert len(hass.states.async_all()) == 6
assert hass.states.get("sensor.clip_light_level_sensor").state == "999.8"
# Disallow clip sensors
hass.config_entries.async_update_entry(
config_entry, options={CONF_ALLOW_CLIP_SENSOR: False}
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 5
assert hass.states.get("sensor.clip_light_level_sensor") is None
# Allow clip sensors
hass.config_entries.async_update_entry(
config_entry, options={CONF_ALLOW_CLIP_SENSOR: True}
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 6
assert hass.states.get("sensor.clip_light_level_sensor")
async def test_add_new_sensor(hass, aioclient_mock):
"""Test that adding a new sensor works."""
config_entry = await setup_deconz_integration(hass, aioclient_mock)
gateway = get_gateway_from_config_entry(hass, config_entry)
assert len(hass.states.async_all()) == 0
state_added_event = {
"t": "event",
"e": "added",
"r": "sensors",
"id": "1",
"sensor": deepcopy(SENSORS["1"]),
}
gateway.api.event_handler(state_added_event)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert hass.states.get("sensor.light_level_sensor").state == "999.8"
async def test_add_battery_later(hass, aioclient_mock):
"""Test that a sensor without an initial battery state creates a battery sensor once state exist."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = {"1": deepcopy(SENSORS["3"])}
config_entry = await setup_deconz_integration(
hass, aioclient_mock, get_state_response=data
)
gateway = get_gateway_from_config_entry(hass, config_entry)
remote = gateway.api.sensors["1"]
assert len(hass.states.async_all()) == 0
assert len(gateway.events) == 1
assert len(remote._callbacks) == 2 # Event and battery tracker
remote.update({"config": {"battery": 50}})
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert len(gateway.events) == 1
assert len(remote._callbacks) == 2 # Event and battery entity
assert hass.states.get("sensor.switch_1_battery_level")
async def test_special_danfoss_battery_creation(hass, aioclient_mock):
"""Test the special Danfoss battery creation works.
Normally there should only be one battery sensor per device from deCONZ.
With specific Danfoss devices each endpoint can report its own battery state.
"""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = {
"1": {
"config": {
"battery": 70,
"heatsetpoint": 2300,
"offset": 0,
"on": True,
"reachable": True,
"schedule": {},
"schedule_on": False,
},
"ep": 1,
"etag": "982d9acc38bee5b251e24a9be26558e4",
"lastseen": "2021-02-15T12:23Z",
"manufacturername": "Danfoss",
"modelid": "0x8030",
"name": "0x8030",
"state": {
"lastupdated": "2021-02-15T12:23:07.994",
"on": False,
"temperature": 2307,
},
"swversion": "YYYYMMDD",
"type": "ZHAThermostat",
"uniqueid": "58:8e:81:ff:fe:00:11:22-01-0201",
},
"2": {
"config": {
"battery": 86,
"heatsetpoint": 2300,
"offset": 0,
"on": True,
"reachable": True,
"schedule": {},
"schedule_on": False,
},
"ep": 2,
"etag": "62f12749f9f51c950086aff37dd02b61",
"lastseen": "2021-02-15T12:23Z",
"manufacturername": "Danfoss",
"modelid": "0x8030",
"name": "0x8030",
"state": {
"lastupdated": "2021-02-15T12:23:22.399",
"on": False,
"temperature": 2316,
},
"swversion": "YYYYMMDD",
"type": "ZHAThermostat",
"uniqueid": "58:8e:81:ff:fe:00:11:22-02-0201",
},
"3": {
"config": {
"battery": 86,
"heatsetpoint": 2350,
"offset": 0,
"on": True,
"reachable": True,
"schedule": {},
"schedule_on": False,
},
"ep": 3,
"etag": "f50061174bb7f18a3d95789bab8b646d",
"lastseen": "2021-02-15T12:23Z",
"manufacturername": "Danfoss",
"modelid": "0x8030",
"name": "0x8030",
"state": {
"lastupdated": "2021-02-15T12:23:25.466",
"on": False,
"temperature": 2337,
},
"swversion": "YYYYMMDD",
"type": "ZHAThermostat",
"uniqueid": "58:8e:81:ff:fe:00:11:22-03-0201",
},
"4": {
"config": {
"battery": 85,
"heatsetpoint": 2300,
"offset": 0,
"on": True,
"reachable": True,
"schedule": {},
"schedule_on": False,
},
"ep": 4,
"etag": "eea97adf8ce1b971b8b6a3a31793f96b",
"lastseen": "2021-02-15T12:23Z",
"manufacturername": "Danfoss",
"modelid": "0x8030",
"name": "0x8030",
"state": {
"lastupdated": "2021-02-15T12:23:41.939",
"on": False,
"temperature": 2333,
},
"swversion": "YYYYMMDD",
"type": "ZHAThermostat",
"uniqueid": "58:8e:81:ff:fe:00:11:22-04-0201",
},
"5": {
"config": {
"battery": 83,
"heatsetpoint": 2300,
"offset": 0,
"on": True,
"reachable": True,
"schedule": {},
"schedule_on": False,
},
"ep": 5,
"etag": "1f7cd1a5d66dc27ac5eb44b8c47362fb",
"lastseen": "2021-02-15T12:23Z",
"manufacturername": "Danfoss",
"modelid": "0x8030",
"name": "0x8030",
"state": {"lastupdated": "none", "on": False, "temperature": 2325},
"swversion": "YYYYMMDD",
"type": "ZHAThermostat",
"uniqueid": "58:8e:81:ff:fe:00:11:22-05-0201",
},
}
await setup_deconz_integration(hass, aioclient_mock, get_state_response=data)
assert len(hass.states.async_all()) == 10
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 5
async def test_air_quality_sensor(hass, aioclient_mock):
"""Test successful creation of air quality sensor entities."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = {
"0": {
"config": {"on": True, "reachable": True},
"ep": 2,
"etag": "c2d2e42396f7c78e11e46c66e2ec0200",
"lastseen": "2020-11-20T22:48Z",
"manufacturername": "BOSCH",
"modelid": "AIR",
"name": "Air quality",
"state": {
"airquality": "poor",
"airqualityppb": 809,
"lastupdated": "2020-11-20T22:48:00.209",
},
"swversion": "20200402",
"type": "ZHAAirQuality",
"uniqueid": "00:12:4b:00:14:4d:00:07-02-fdef",
}
}
await setup_deconz_integration(hass, aioclient_mock, get_state_response=data)
assert len(hass.states.async_all()) == 1
air_quality = hass.states.get("sensor.air_quality")
assert air_quality.state == "poor"
async def test_time_sensor(hass, aioclient_mock):
"""Test successful creation of time sensor entities."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = {
"0": {
"config": {"battery": 40, "on": True, "reachable": True},
"ep": 1,
"etag": "28e796678d9a24712feef59294343bb6",
"lastseen": "2020-11-22T11:26Z",
"manufacturername": "Danfoss",
"modelid": "eTRV0100",
"name": "Time",
"state": {
"lastset": "2020-11-19T08:07:08Z",
"lastupdated": "2020-11-22T10:51:03.444",
"localtime": "2020-11-22T10:51:01",
"utc": "2020-11-22T10:51:01Z",
},
"swversion": "20200429",
"type": "ZHATime",
"uniqueid": "cc:cc:cc:ff:fe:38:4d:b3-01-000a",
}
}
await setup_deconz_integration(hass, aioclient_mock, get_state_response=data)
assert len(hass.states.async_all()) == 2
time = hass.states.get("sensor.time")
assert time.state == "2020-11-19T08:07:08Z"
time_battery = hass.states.get("sensor.time_battery_level")
assert time_battery.state == "40"
async def test_unsupported_sensor(hass, aioclient_mock):
"""Test that unsupported sensors doesn't break anything."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = {
"0": {"type": "not supported", "name": "name", "state": {}, "config": {}}
}
await setup_deconz_integration(hass, aioclient_mock, get_state_response=data)
assert len(hass.states.async_all()) == 1
unsupported_sensor = hass.states.get("sensor.name")
assert unsupported_sensor.state == "unknown"
|
py | 1a521c763bbe86f3260f67abf69107699e82880b | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.runtime.north.plugins.storage.storage_base import StorageBase
from calvin.utilities import calvinlogger
from calvin.utilities import calvinconfig
from calvin.utilities.calvin_callback import CalvinCB
from calvin.utilities import calvinuuid
_conf = calvinconfig.get()
_log = calvinlogger.get_logger(__name__)
class StorageProxy(StorageBase):
""" Implements a storage that asks a master node, this is the client class"""
def __init__(self, node):
self.master_uri = _conf.get(None, 'storage_proxy')
self.node = node
self.tunnel = None
self.replies = {}
_log.debug("PROXY init for %s", self.master_uri)
super(StorageProxy, self).__init__()
def start(self, iface='', network='', bootstrap=[], cb=None, name=None):
"""
Starts the service if its needed for the storage service
cb is the callback called when the start is finished
"""
_log.debug("PROXY start")
self.node.network.join([self.master_uri], CalvinCB(self._start_link_cb, org_cb=cb))
def _start_link_cb(self, status, uri, peer_node_id, org_cb):
_log.analyze(self.node.id, "+", {'status': str(status)}, peer_node_id=peer_node_id)
if status == "NACK":
if org_cb:
org_cb(False)
return
# Got link set up tunnel
self.master_id = peer_node_id
self.tunnel = self.node.proto.tunnel_new(self.master_id, 'storage', {})
self.tunnel.register_tunnel_down(CalvinCB(self.tunnel_down, org_cb=org_cb))
self.tunnel.register_tunnel_up(CalvinCB(self.tunnel_up, org_cb=org_cb))
self.tunnel.register_recv(self.tunnel_recv_handler)
def tunnel_down(self, org_cb):
""" Callback that the tunnel is not accepted or is going down """
if not self.tunnel:
return True
_log.analyze(self.node.id, "+ CLIENT", {'tunnel_id': self.tunnel.id})
self.tunnel = None
# FIXME assumes that the org_cb is the callback given by storage when starting, can only be called once
# not future up/down
if org_cb:
org_cb(False)
# We should always return True which sends an ACK on the destruction of the tunnel
return True
def tunnel_up(self, org_cb):
""" Callback that the tunnel is working """
if not self.tunnel:
return True
_log.analyze(self.node.id, "+ CLIENT", {'tunnel_id': self.tunnel.id})
# FIXME assumes that the org_cb is the callback given by storage when starting, can only be called once
# not future up/down
if org_cb:
org_cb(True)
# We should always return True which sends an ACK on the destruction of the tunnel
return True
def tunnel_recv_handler(self, payload):
""" Gets called when a storage master replies"""
_log.analyze(self.node.id, "+ CLIENT", {'payload': payload})
if 'msg_uuid' in payload and payload['msg_uuid'] in self.replies and 'cmd' in payload and payload['cmd']=='REPLY':
self.replies.pop(payload['msg_uuid'])(**{k: v for k, v in payload.iteritems() if k in ('key', 'value')})
def send(self, cmd, msg, cb):
msg_id = calvinuuid.uuid("MSGID")
self.replies[msg_id] = cb
msg['msg_uuid'] = msg_id
self.tunnel.send(dict(msg, cmd=cmd, msg_uuid=msg_id))
def set(self, key, value, cb=None):
"""
Set a key, value pair in the storage
"""
_log.analyze(self.node.id, "+ CLIENT", {'key': key, 'value': value})
self.send(cmd='SET',msg={'key':key, 'value': value}, cb=cb)
def get(self, key, cb=None):
"""
Gets a value from the storage
"""
_log.analyze(self.node.id, "+ CLIENT", {'key': key})
self.send(cmd='GET',msg={'key':key}, cb=cb)
def get_concat(self, key, cb=None):
"""
Gets a value from the storage
"""
_log.analyze(self.node.id, "+ CLIENT", {'key': key})
self.send(cmd='GET_CONCAT',msg={'key':key}, cb=cb)
def append(self, key, value, cb=None):
_log.analyze(self.node.id, "+ CLIENT", {'key': key, 'value': value})
self.send(cmd='APPEND',msg={'key':key, 'value': value}, cb=cb)
def remove(self, key, value, cb=None):
_log.analyze(self.node.id, "+ CLIENT", {'key': key, 'value': value})
self.send(cmd='REMOVE',msg={'key':key, 'value': value}, cb=cb)
def bootstrap(self, addrs, cb=None):
_log.analyze(self.node.id, "+ CLIENT", None)
def stop(self, cb=None):
_log.analyze(self.node.id, "+ CLIENT", None)
if cb:
cb() |
py | 1a521c973b7353c5da03ec2f2e6466275592fb06 | import re
from datetime import datetime
from aiogram import types
from aiogram.dispatcher import FSMContext
from aiogram.utils.exceptions import MessageNotModified
from data import config
from data.config import LOCAL_TZ
from keyboards.inline import events, day, back_callback, delete_callback, create_callback, continue_callback
from keyboards.inline.admin import edit_subgroups, event_operations, cancel, event_type, cancel_or_delete, \
cancel_or_create, continue_or_cancel
from loader import dp, bot
from models import Admin, Event, User
from states.admin import AdminStates
from states.admin.create_event import CreateEventStates
from states.admin.edit_event import EditEventStates
from utils.misc import get_current_admin, get_current_user
from middlewares import _
def get_event_template(event):
date = event.event_over.strftime("%d.%m.%Y") if event.event_over else "-"
return """
Название: [{}]({})
Тип: {}
Дата: {}
Время: {}
""".format(event.title, event.link, config.TYPE_EVENT.get(event.type), date, event.time.strftime("%H:%M"))
@get_current_admin()
@dp.callback_query_handler(back_callback.filter(category='lang'),
state=EditEventStates.all_states)
async def back_to_choose_subgroup(callback: types.CallbackQuery, state: FSMContext, admin: Admin):
await callback.answer("")
await AdminStates.events.set()
await admin.fetch_related('group')
keyboard = await edit_subgroups.get_keyboard(admin.group.id, editable=False, for_events=True)
await callback.message.edit_text(_('Выбери подгруппу:'), reply_markup=keyboard)
@get_current_admin()
@dp.callback_query_handler(back_callback.filter(category='subgroup'),
state=EditEventStates.all_states)
async def back_to_choose_day(callback: types.CallbackQuery, state: FSMContext, admin: Admin):
await callback.answer("")
await callback.message.edit_text(_("Выбери день:"), reply_markup=day.keyboard)
await EditEventStates.day.set()
@get_current_admin()
@dp.callback_query_handler(back_callback.filter(category='event'),
state=EditEventStates.all_states)
async def back_to_choose_event(callback: types.CallbackQuery, state: FSMContext, admin: Admin):
await callback.answer("")
data = await state.get_data()
await admin.fetch_related("group")
keyboard = await events.get_keyboard(day=data.get('day'), subgroup_id=data.get('subgroup_id'), editable=True,
group_id=admin.group.id)
await callback.message.edit_text(_("Выбери событие или создай новое:"), reply_markup=keyboard)
await EditEventStates.event.set()
@get_current_admin()
@dp.callback_query_handler(state=AdminStates.events)
async def entry_manage_events(callback: types.CallbackQuery, state: FSMContext, admin: Admin):
await callback.answer("")
if callback.data == "all-events":
await state.update_data(subgroup_id=None)
await callback.message.edit_text(_("Выбери день:"), reply_markup=day.keyboard)
elif callback.data.startswith('subgroup-'):
subgroup_id = callback.data.split('-')[-1]
subgroup_id = int(subgroup_id)
await callback.message.edit_text(_("Выбери день:"), reply_markup=day.keyboard)
await state.update_data(subgroup_id=subgroup_id)
await EditEventStates.day.set()
@get_current_admin()
@dp.callback_query_handler(state=EditEventStates.day)
async def choose_day(callback: types.CallbackQuery, state: FSMContext, admin: Admin):
await callback.answer(_("День выбран"))
await state.update_data(day=callback.data)
data = await state.get_data()
await admin.fetch_related("group")
keyboard = await events.get_keyboard(day=callback.data, subgroup_id=data.get('subgroup_id'), editable=True,
group_id=admin.group.id)
await callback.message.edit_text(_("Выберите событие или создайте новое:"), reply_markup=keyboard)
await EditEventStates.event.set()
@dp.callback_query_handler(state=EditEventStates.event)
async def choose_event(callback: types.CallbackQuery, state: FSMContext):
await callback.answer(_("Событие выбрано"))
if callback.data.startswith('event-'):
event_id = callback.data.split('-')[-1]
event_id = int(event_id)
await state.update_data(event_id=event_id)
event = await Event.get(id=event_id)
await callback.message.edit_text(get_event_template(event),
parse_mode="Markdown", reply_markup=event_operations.keyboard,
disable_web_page_preview=True)
await EditEventStates.operation.set()
elif callback.data == 'add-event':
await callback.message.edit_text(_("Выбери тип события:"), reply_markup=event_type.keyboard)
await CreateEventStates.type.set()
@dp.callback_query_handler(back_callback.filter(category='cancel'),
state=EditEventStates.all_states)
async def back_choose_day(callback: types.CallbackQuery, state: FSMContext):
await callback.answer(_("Ты вернулся назад"))
data = await state.get_data()
event = await Event.get(id=data.get('event_id'))
await callback.message.edit_text(get_event_template(event),
parse_mode="Markdown", reply_markup=event_operations.keyboard,
disable_web_page_preview=True)
await EditEventStates.operation.set()
@get_current_admin()
@dp.callback_query_handler(back_callback.filter(category='cancel'),
state=CreateEventStates.all_states)
async def back_choose_event(callback: types.CallbackQuery, state: FSMContext, admin: Admin):
await callback.answer(_("Ты вернулся назад"))
data = await state.get_data()
await admin.fetch_related("group")
keyboard = await events.get_keyboard(day=data.get('day'), subgroup_id=data.get('subgroup_id'), editable=True,
group_id=admin.group.id)
await callback.message.edit_text(_("Выбери событие или создай новое:"), reply_markup=keyboard)
await EditEventStates.event.set()
@get_current_admin()
@dp.callback_query_handler(delete_callback.filter(category='event'), state=EditEventStates.operation)
async def delete_event(callback: types.CallbackQuery, state: FSMContext, admin: Admin):
await callback.answer(_("Событие удалено"))
data = await state.get_data()
event = await Event.get(id=data.get('event_id'))
if event:
await event.delete()
await admin.fetch_related("group")
keyboard = await events.get_keyboard(day=data.get('day'), subgroup_id=data.get('subgroup_id'), editable=True,
group_id=admin.group.id)
await callback.message.edit_text(_("Выберите событие или создайте новое:"), reply_markup=keyboard)
@dp.callback_query_handler(state=EditEventStates.operation)
async def choose_operation(callback: types.CallbackQuery, state: FSMContext):
await callback.answer("Выбрано событие")
if callback.data == 'edit-title':
await callback.message.edit_text(_("Укажи новое название события:"), reply_markup=cancel.keyboard)
await EditEventStates.title.set()
elif callback.data == 'edit-type':
await callback.message.edit_text(_("Выбери тип события:"), reply_markup=event_type.keyboard)
await EditEventStates.type.set()
elif callback.data == 'edit-date':
await callback.message.edit_text(_("Выбери новую дату события:"), reply_markup=cancel.keyboard)
await EditEventStates.over.set()
elif callback.data == 'edit-link':
await callback.message.edit_text(_("Введи новую ссылку:"), reply_markup=cancel.keyboard)
await EditEventStates.link.set()
elif callback.data == 'edit-time':
await callback.message.edit_text(_("Напиши время начала пары:"), reply_markup=cancel.keyboard)
await EditEventStates.time.set()
elif callback.data == 'delete':
data = await state.get_data()
event = await Event.get(id=data.get('event_id'))
keyboard = await cancel_or_delete.get_keyboard('event')
await callback.message.edit_text(_('Уверен, что хочешь удалить "{}"?'.format(event.title)),
reply_markup=keyboard, disable_web_page_preview=True)
@get_current_admin()
@get_current_user()
@dp.message_handler(state=EditEventStates.time)
async def change_time(msg: types.Message, state: FSMContext, user: User, admin: Admin):
data = await state.get_data()
event = await Event.get(id=data.get('event_id'))
await msg.delete()
try:
hour, minute = map(int, msg.text.split(':'))
if event:
event.time = datetime(year=1991, month=8, day=24, hour=hour, minute=minute)
await event.save()
await admin.fetch_related("group")
await bot.edit_message_text(get_event_template(event),
parse_mode="Markdown", reply_markup=event_operations.keyboard, chat_id=user.tele_id,
message_id=data.get('current_msg'), disable_web_page_preview=True)
await EditEventStates.operation.set()
except ValueError:
await bot.edit_message_text(_("Неправильный формат или время!"), user.tele_id,
data.get('current_msg'), reply_markup=cancel.keyboard)
@get_current_user()
@dp.message_handler(state=EditEventStates.title)
async def change_title(msg: types.Message, state: FSMContext, user: User):
data = await state.get_data()
event = await Event.get(id=data.get('event_id'))
await msg.delete()
if event:
event.title = msg.text
await event.save()
await bot.edit_message_text(get_event_template(event),
parse_mode="Markdown", reply_markup=event_operations.keyboard, chat_id=user.tele_id,
message_id=data.get('current_msg'), disable_web_page_preview=True)
await EditEventStates.operation.set()
@get_current_user()
@dp.message_handler(state=EditEventStates.link)
async def change_link(msg: types.Message, state: FSMContext, user: User):
data = await state.get_data()
event = await Event.get(id=data.get('event_id'))
await msg.delete()
if re.match(
'(https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9]+\.[^\s]{2,}|www\.[a-zA-Z0-9]+\.[^\s]{2,})',
msg.text):
if event:
event.link = msg.text
await event.save()
await bot.edit_message_text(get_event_template(event),
parse_mode="Markdown", reply_markup=event_operations.keyboard, chat_id=user.tele_id,
message_id=data.get('current_msg'), disable_web_page_preview=True)
await EditEventStates.operation.set()
else:
try:
await bot.edit_message_text(_("Неправильная ссылка!"), reply_markup=cancel.keyboard, chat_id=user.tele_id,
message_id=data.get('current_msg'))
except MessageNotModified:
pass
@get_current_user()
@dp.message_handler(state=EditEventStates.over)
async def change_date(msg: types.Message, state: FSMContext, user: User):
data = await state.get_data()
event = await Event.get(id=data.get('event_id'))
await msg.delete()
day, month, year = map(int, msg.text.split('.'))
try:
date_over = datetime(year, month, day)
timestamp_now = LOCAL_TZ.localize(datetime.now())
timestamp = LOCAL_TZ.localize(
datetime(timestamp_now.year, timestamp_now.month, timestamp_now.day))
if config.LOCAL_TZ.localize(date_over) >= timestamp:
if event:
event.event_over = date_over
await event.save()
date = date_over.strftime("%A, %d.%m.%Y") if date_over else "-"
await bot.edit_message_text(get_event_template(event),
parse_mode="Markdown", reply_markup=event_operations.keyboard,
chat_id=user.tele_id,
message_id=data.get('current_msg'), disable_web_page_preview=True)
await EditEventStates.operation.set()
else:
try:
await bot.edit_message_text(
_('Дата указывает на прошлое..'), user.tele_id,
message_id=data.get('current_msg'))
except MessageNotModified:
pass
except ValueError:
try:
await bot.edit_message_text(
_('Дата указана не корректно..'), user.tele_id,
message_id=data.get('current_msg'))
except MessageNotModified:
pass
@get_current_user()
@dp.callback_query_handler(state=EditEventStates.type)
async def change_type(callback: types.CallbackQuery, state: FSMContext, user: User):
await callback.answer(_("Выбрано событие:"))
data = await state.get_data()
event = await Event.get(id=data.get('event_id'))
if event:
event.type = callback.data
await event.save()
await callback.message.edit_text(get_event_template(event),
parse_mode="Markdown", reply_markup=event_operations.keyboard,
disable_web_page_preview=True)
await EditEventStates.operation.set()
@dp.callback_query_handler(state=CreateEventStates.type)
async def get_type(callback: types.CallbackQuery, state: FSMContext):
await callback.answer(_("Тип выбран"))
await state.update_data(type_event=callback.data)
await callback.message.edit_text(_("Укажи название события:"), reply_markup=cancel.keyboard)
await CreateEventStates.title.set()
@get_current_user()
@dp.message_handler(state=CreateEventStates.title)
async def get_title(msg: types.Message, state: FSMContext, user: User):
await msg.delete()
await state.update_data(title=msg.text)
data = await state.get_data()
keyboard = await continue_or_cancel.get_keyboard('event')
await bot.edit_message_text(_("Укажи дату до которой будет повторяться событие"), reply_markup=keyboard,
chat_id=user.tele_id,
message_id=data.get('current_msg'))
await CreateEventStates.over.set()
@get_current_user()
@dp.callback_query_handler(continue_callback.filter(), state=CreateEventStates.over)
async def go_to_link(callback: types.CallbackQuery, state: FSMContext, user: User):
await callback.answer("")
data = await state.get_data()
await bot.edit_message_text(_("Введи ссылку на событие:"), reply_markup=cancel.keyboard,
chat_id=user.tele_id,
message_id=data.get('current_msg'))
await CreateEventStates.link.set()
@get_current_user()
@dp.message_handler(state=CreateEventStates.over)
async def get_event_over(msg: types.Message, state: FSMContext, user: User):
await msg.delete()
data = await state.get_data()
day, month, year = map(int, msg.text.split('.'))
try:
date_over = datetime(year, month, day)
timestamp_now = LOCAL_TZ.localize(datetime.now())
timestamp = LOCAL_TZ.localize(
datetime(timestamp_now.year, timestamp_now.month, timestamp_now.day))
if config.LOCAL_TZ.localize(date_over) >= timestamp:
await state.update_data(event_over=date_over)
await CreateEventStates.link.set()
await bot.edit_message_text(_("Введи ссылку на событие:"), reply_markup=cancel.keyboard,
chat_id=user.tele_id,
message_id=data.get('current_msg'))
else:
try:
await bot.edit_message_text(
_('Дата указывает на прошлое..'), user.tele_id,
message_id=data.get('current_msg'), reply_markup=cancel.keyboard)
except MessageNotModified:
pass
except ValueError:
try:
await bot.edit_message_text(
_('Дата указана неправильно'), user.tele_id,
message_id=data.get('current_msg'), reply_markup=cancel.keyboard)
except MessageNotModified:
pass
@get_current_user()
@get_current_admin()
@dp.callback_query_handler(create_callback.filter(category='event'), state=CreateEventStates.event)
async def create_event(callback: types.CallbackQuery, state: FSMContext, user: User, admin: Admin):
await callback.answer(_("Событие создано"))
data = await state.get_data()
await admin.fetch_related("group")
await Event.create(title=data.get('title'), day=data.get('day'), type=data.get('type_event'),
link=data.get('link'), event_over=data.get('event_over'), group_id=admin.group.id,
subgroup_id=data.get('subgroup_id'), time=data.get('time'))
keyboard = await events.get_keyboard(day=data.get('day'), subgroup_id=data.get('subgroup_id'), editable=True,
group_id=admin.group.id)
await callback.message.edit_text(_("Выбери событие или создай новое:"), reply_markup=keyboard)
await EditEventStates.event.set()
@get_current_admin()
@get_current_user()
@dp.message_handler(state=CreateEventStates.time)
async def set_time(msg: types.Message, state: FSMContext, user: User, admin: Admin):
data = await state.get_data()
await msg.delete()
try:
hour, minute = map(int, msg.text.split(':'))
time = datetime(year=1991, month=8, day=24, hour=hour, minute=minute)
await state.update_data(time=time)
await admin.fetch_related("group")
keyboard = await cancel_or_create.get_keyboard('event')
date = data.get('event_over').strftime("%A, %d.%m.%Y") if data.get('event_over') else "-"
await bot.edit_message_text(
'''
Название: [{}]({})
Тип: {}
Дата: {}
Время: {}
'''.format(data.get('title'), data.get('link'), config.TYPE_EVENT[data.get('type_event')], date,
time.strftime("%H:%M")),
user.tele_id,
message_id=data.get('current_msg'), reply_markup=keyboard, parse_mode="Markdown")
await CreateEventStates.event.set()
except ValueError:
await bot.edit_message_text(_("Неправильный формат или время!"), user.tele_id,
data.get('current_msg'), reply_markup=cancel.keyboard)
@get_current_user()
@dp.message_handler(state=CreateEventStates.link)
async def get_link(msg: types.Message, state: FSMContext, user: User):
await msg.delete()
data = await state.get_data()
if re.match(
'(https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9]+\.[^\s]{2,}|www\.[a-zA-Z0-9]+\.[^\s]{2,})',
msg.text):
await state.update_data(link=msg.text)
await bot.edit_message_text(_("Укажи время начала события:"),
user.tele_id,
message_id=data.get('current_msg'), reply_markup=cancel.keyboard,
parse_mode="Markdown")
await CreateEventStates.time.set()
else:
try:
await bot.edit_message_text(
_('Неправильная ссылка'), user.tele_id,
message_id=data.get('current_msg'), reply_markup=cancel.keyboard)
except MessageNotModified:
pass
@dp.message_handler(state=AdminStates.events)
async def clear(msg: types.Message):
await msg.delete()
|
py | 1a521d6a4adcc1e2a535c2413056e63d63847f07 | # Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For further info, check https://github.com/canonical/charmcraft
"""Tests for the commands infraestructure."""
import pytest
from charmcraft.cmdbase import CommandError, BaseCommand
from charmcraft.main import COMMAND_GROUPS
def test_commanderror_retcode_default():
"""The CommandError return code default."""
err = CommandError('problem')
assert err.retcode == 1
def test_commanderror_retcode_given():
"""The CommandError holds the return code."""
err = CommandError('problem', retcode=4)
assert err.retcode == 4
all_commands = list.__add__(*[commands for _, _, commands in COMMAND_GROUPS])
@pytest.mark.parametrize('command', all_commands)
@pytest.mark.parametrize('attrib', ['name', 'help_msg', 'overview'])
def test_basecommand_mandatory_attributes(command, attrib):
"""All commands must provide the mandatory attributes."""
assert getattr(command, attrib) is not None
def test_basecommand_holds_the_indicated_group():
"""BaseCommand subclasses ."""
class TestClass(BaseCommand):
help_msg = 'help message'
name = 'test'
group = 'test group'
tc = TestClass(group)
assert tc.group == group
def test_basecommand_fill_parser_optional():
"""BaseCommand subclasses are allowed to not override fill_parser."""
class TestClass(BaseCommand):
help_msg = 'help message'
name = 'test'
def __init__(self, group):
self.done = False
super().__init__(group)
def run(self, parsed_args):
self.done = True
tc = TestClass('group')
tc.run([])
assert tc.done
def test_basecommand_run_mandatory():
"""BaseCommand subclasses must override run."""
class TestClass(BaseCommand):
help_msg = 'help message'
name = 'test'
tc = TestClass('group')
with pytest.raises(NotImplementedError):
tc.run([])
|
py | 1a521d8a084c2eda94477a0d606ff747c1d862a7 | # (c) Copyright [2018-2021] Micro Focus or one of its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# |_ |~) _ _| _ /~\ _ |.
# |_)\/ |_)(_|(_|| \_/|_|(_|||
# /
# ____________ ______
# / __ `\ / /
# | \/ / / /
# |______ / / /
# |____/ / /
# _____________ / /
# \ / / /
# \ / / /
# \_______/ / /
# ______ / /
# \ / / /
# \ / / /
# \/ / /
# / /
# / /
# \ /
# \ /
# \/
# _
# \ / _ __|_. _ _ |_)
# \/ (/_| | |(_(_|| \/
# /
# VerticaPy is a Python library with scikit-like functionality to use to conduct
# data science projects on data stored in Vertica, taking advantage Vertica’s
# speed and built-in analytics and machine learning features. It supports the
# entire data science life cycle, uses a ‘pipeline’ mechanism to sequentialize
# data transformation operations, and offers beautiful graphical options.
#
# VerticaPy aims to solve all of these problems. The idea is simple: instead
# of moving data around for processing, VerticaPy brings the logic to the data.
#
#
# Modules
#
# Standard Python Modules
import math, decimal, datetime
from typing import Union
# Other Python Modules
from scipy.stats import chi2, norm, f
import numpy as np
# VerticaPy Modules
import verticapy
from verticapy.utilities import *
from verticapy.toolbox import *
from verticapy.learn.linear_model import LinearRegression
from verticapy import vDataFrame
# Statistical Tests & Tools
# ---#
def adfuller(
vdf: vDataFrame,
column: str,
ts: str,
by: list = [],
p: int = 1,
with_trend: bool = False,
regresults: bool = False,
):
"""
---------------------------------------------------------------------------
Augmented Dickey Fuller test (Time Series stationarity).
Parameters
----------
vdf: vDataFrame
Input vDataFrame.
column: str
Input vcolumn to test.
ts: str
vcolumn used as timeline. It will be to use to order the data. It can be
a numerical or type date like (date, datetime, timestamp...) vcolumn.
by: list, optional
vcolumns used in the partition.
p: int, optional
Number of lags to consider in the test.
with_trend: bool, optional
Adds a trend in the Regression.
regresults: bool, optional
If True, the full regression results are returned.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
def critical_value(alpha, N, with_trend):
if not (with_trend):
if N <= 25:
if alpha == 0.01:
return -3.75
elif alpha == 0.10:
return -2.62
elif alpha == 0.025:
return -3.33
else:
return -3.00
elif N <= 50:
if alpha == 0.01:
return -3.58
elif alpha == 0.10:
return -2.60
elif alpha == 0.025:
return -3.22
else:
return -2.93
elif N <= 100:
if alpha == 0.01:
return -3.51
elif alpha == 0.10:
return -2.58
elif alpha == 0.025:
return -3.17
else:
return -2.89
elif N <= 250:
if alpha == 0.01:
return -3.46
elif alpha == 0.10:
return -2.57
elif alpha == 0.025:
return -3.14
else:
return -2.88
elif N <= 500:
if alpha == 0.01:
return -3.44
elif alpha == 0.10:
return -2.57
elif alpha == 0.025:
return -3.13
else:
return -2.87
else:
if alpha == 0.01:
return -3.43
elif alpha == 0.10:
return -2.57
elif alpha == 0.025:
return -3.12
else:
return -2.86
else:
if N <= 25:
if alpha == 0.01:
return -4.38
elif alpha == 0.10:
return -3.24
elif alpha == 0.025:
return -3.95
else:
return -3.60
elif N <= 50:
if alpha == 0.01:
return -4.15
elif alpha == 0.10:
return -3.18
elif alpha == 0.025:
return -3.80
else:
return -3.50
elif N <= 100:
if alpha == 0.01:
return -4.04
elif alpha == 0.10:
return -3.15
elif alpha == 0.025:
return -3.73
else:
return -5.45
elif N <= 250:
if alpha == 0.01:
return -3.99
elif alpha == 0.10:
return -3.13
elif alpha == 0.025:
return -3.69
else:
return -3.43
elif N <= 500:
if alpha == 0.01:
return 3.98
elif alpha == 0.10:
return -3.13
elif alpha == 0.025:
return -3.68
else:
return -3.42
else:
if alpha == 0.01:
return -3.96
elif alpha == 0.10:
return -3.12
elif alpha == 0.025:
return -3.66
else:
return -3.41
check_types(
[
("ts", ts, [str],),
("column", column, [str],),
("p", p, [int, float],),
("by", by, [list],),
("with_trend", with_trend, [bool],),
("regresults", regresults, [bool],),
("vdf", vdf, [vDataFrame,],),
],
)
columns_check([ts, column] + by, vdf)
ts = vdf_columns_names([ts], vdf)[0]
column = vdf_columns_names([column], vdf)[0]
by = vdf_columns_names(by, vdf)
schema = vdf._VERTICAPY_VARIABLES_["schema_writing"]
if not (schema):
schema = "public"
name = "{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_{}".format(
schema, gen_name([column]).upper()
)
relation_name = "{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}".format(
schema, gen_name([column]).upper()
)
try:
vdf._VERTICAPY_VARIABLES_["cursor"].execute(
"DROP MODEL IF EXISTS {}".format(name)
)
vdf._VERTICAPY_VARIABLES_["cursor"].execute(
"DROP VIEW IF EXISTS {}".format(relation_name)
)
except:
pass
lag = [
"LAG({}, 1) OVER ({}ORDER BY {}) AS lag1".format(
column, "PARTITION BY {}".format(", ".join(by)) if (by) else "", ts
)
]
lag += [
"LAG({}, {}) OVER ({}ORDER BY {}) - LAG({}, {}) OVER ({}ORDER BY {}) AS delta{}".format(
column,
i,
"PARTITION BY {}".format(", ".join(by)) if (by) else "",
ts,
column,
i + 1,
"PARTITION BY {}".format(", ".join(by)) if (by) else "",
ts,
i,
)
for i in range(1, p + 1)
]
lag += [
"{} - LAG({}, 1) OVER ({}ORDER BY {}) AS delta".format(
column, column, "PARTITION BY {}".format(", ".join(by)) if (by) else "", ts
)
]
query = "CREATE VIEW {} AS SELECT {}, {} AS ts FROM {}".format(
relation_name,
", ".join(lag),
"TIMESTAMPDIFF(SECOND, {}, MIN({}) OVER ())".format(ts, ts)
if vdf[ts].isdate()
else ts,
vdf.__genSQL__(),
)
vdf._VERTICAPY_VARIABLES_["cursor"].execute(query)
model = LinearRegression(
name, vdf._VERTICAPY_VARIABLES_["cursor"], solver="Newton", max_iter=1000
)
predictors = ["lag1"] + ["delta{}".format(i) for i in range(1, p + 1)]
if with_trend:
predictors += ["ts"]
model.fit(
relation_name, predictors, "delta",
)
coef = model.coef_
vdf._VERTICAPY_VARIABLES_["cursor"].execute("DROP MODEL IF EXISTS {}".format(name))
vdf._VERTICAPY_VARIABLES_["cursor"].execute(
"DROP VIEW IF EXISTS {}".format(relation_name)
)
if regresults:
return coef
coef = coef.transpose()
DF = coef.values["lag1"][0] / (max(coef.values["lag1"][1], 1e-99))
p_value = coef.values["lag1"][3]
count = vdf.shape()[0]
result = tablesample(
{
"index": [
"ADF Test Statistic",
"p_value",
"# Lags used",
"# Observations Used",
"Critical Value (1%)",
"Critical Value (2.5%)",
"Critical Value (5%)",
"Critical Value (10%)",
"Stationarity (alpha = 1%)",
],
"value": [
DF,
p_value,
p,
count,
critical_value(0.01, count, with_trend),
critical_value(0.025, count, with_trend),
critical_value(0.05, count, with_trend),
critical_value(0.10, count, with_trend),
DF < critical_value(0.01, count, with_trend) and p_value < 0.01,
],
}
)
return result
# ---#
def cochrane_orcutt(
model, vdf: Union[vDataFrame, str], ts: str, prais_winsten: bool = False, drop_tmp_model: bool = True,
):
"""
---------------------------------------------------------------------------
Performs a Cochrane-Orcutt estimation.
Parameters
----------
model: vModel
Linear regression object.
vdf: vDataFrame / str
Input relation.
ts: str
vcolumn of numeric or date-like type (date, datetime, timestamp, etc.)
used as the timeline and to order the data.
prais_winsten: bool, optional
If true, retains the first observation of the time series, increasing
precision and efficiency. This configuration is called the
Prais–Winsten estimation.
drop_tmp_model: bool, optional
If true, it drops the temporary model.
Returns
-------
model
A Linear Model with the different information stored as attributes:
- coef_ : Model's coefficients.
- pho_ : Cochrane-Orcutt pho.
- anova_table_ : ANOVA table.
- r2_ : R2
"""
check_types(
[("vdf", vdf, [vDataFrame, str,],),
("ts", ts, [vDataFrame, str,],),
("drop_tmp_model", drop_tmp_model, [bool,],),],
)
if isinstance(vdf, str):
vdf_tmp = vdf_from_relation(vdf, cursor=model.cursor)
else:
vdf_tmp = vdf.copy()
columns_check([ts], vdf_tmp)
schema, relation = schema_relation(model.name)
name = schema + ".VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_{}".format(
get_session(model.cursor)
)
param = model.get_params()
model_tmp = type(model)(name, model.cursor)
model_tmp.set_params(param)
X, y = model.X, model.y
print_info = verticapy.options["print_info"]
verticapy.options["print_info"] = False
if prais_winsten:
vdf_tmp = vdf_tmp[X + [y, ts]].dropna()
verticapy.options["print_info"] = print_info
prediction_name = "prediction_{}".format(get_session(vdf._VERTICAPY_VARIABLES_["cursor"]))
eps_name = "eps_{}".format(get_session(vdf._VERTICAPY_VARIABLES_["cursor"]))
model.predict(vdf_tmp, X=X, name=prediction_name,)
vdf_tmp[eps_name] = vdf_tmp[y] - vdf_tmp[prediction_name]
query = "SELECT SUM(num) / SUM(den) FROM (SELECT {} * LAG({}) OVER (ORDER BY {}) AS num, POWER({}, 2) AS den FROM {}) x".format(eps_name, eps_name, ts, eps_name, vdf_tmp.__genSQL__())
vdf.__executeSQL__(
query,
title="Computes the Cochrane Orcutt pho.",
)
pho = vdf_tmp._VERTICAPY_VARIABLES_["cursor"].fetchone()[0]
for elem in X + [y]:
new_val = "{} - {} * LAG({}) OVER (ORDER BY {})".format(elem, pho, elem, ts)
if prais_winsten:
new_val = "COALESCE({}, {} * {})".format(new_val, elem, (1 - pho ** 2) ** (0.5))
vdf_tmp[elem] = new_val
model_tmp.drop()
model_tmp.fit(vdf_tmp, X, y)
model_tmp.pho_ = pho
model_tmp.anova_table_ = model.regression_report("anova")
model_tmp.r2_ = model.score("r2")
if drop_tmp_model:
model_tmp.drop()
return model_tmp
# ---#
def durbin_watson(
vdf: vDataFrame, eps: str, ts: str, by: list = [],
):
"""
---------------------------------------------------------------------------
Durbin Watson test (residuals autocorrelation).
Parameters
----------
vdf: vDataFrame
Input vDataFrame.
eps: str
Input residual vcolumn.
ts: str
vcolumn used as timeline. It will be to use to order the data. It can be
a numerical or type date like (date, datetime, timestamp...) vcolumn.
by: list, optional
vcolumns used in the partition.
Returns
-------
float
Durbin Watson statistic
"""
check_types(
[
("ts", ts, [str],),
("eps", eps, [str],),
("by", by, [list],),
("vdf", vdf, [vDataFrame, str,],),
],
)
columns_check([eps] + [ts] + by, vdf)
eps = vdf_columns_names([eps], vdf)[0]
ts = vdf_columns_names([ts], vdf)[0]
by = vdf_columns_names(by, vdf)
query = "(SELECT et, LAG(et) OVER({}ORDER BY {}) AS lag_et FROM (SELECT {} AS et, {}{} FROM {}) VERTICAPY_SUBTABLE) VERTICAPY_SUBTABLE".format(
"PARTITION BY {} ".format(", ".join(by)) if (by) else "",
ts,
eps,
ts,
(", " + ", ".join(by)) if (by) else "",
vdf.__genSQL__(),
)
vdf.__executeSQL__(
"SELECT SUM(POWER(et - lag_et, 2)) / SUM(POWER(et, 2)) FROM {}".format(query),
title="Computes the Durbin Watson d.",
)
d = vdf._VERTICAPY_VARIABLES_["cursor"].fetchone()[0]
return d
# ---#
def endogtest(
vdf: vDataFrame, eps: str, X: list,
):
"""
---------------------------------------------------------------------------
Endogeneity test.
Parameters
----------
vdf: vDataFrame
Input vDataFrame.
eps: str
Input residual vcolumn.
X: list
Input Variables to test the endogeneity on.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types(
[("eps", eps, [str],), ("X", X, [list],), ("vdf", vdf, [vDataFrame, str,],),],
)
columns_check([eps] + X, vdf)
eps = vdf_columns_names([eps], vdf)[0]
X = vdf_columns_names(X, vdf)
from verticapy.learn.linear_model import LinearRegression
schema_writing = vdf._VERTICAPY_VARIABLES_["schema_writing"]
if not (schema_writing):
schema_writing = "public"
name = schema_writing + ".VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_{}".format(
get_session(vdf._VERTICAPY_VARIABLES_["cursor"])
)
model = LinearRegression(name, cursor=vdf._VERTICAPY_VARIABLES_["cursor"])
try:
model.fit(vdf, X, eps)
R2 = model.score("r2")
model.drop()
except:
try:
model.set_params({"solver": "bfgs"})
model.fit(vdf, X, eps)
R2 = model.score("r2")
model.drop()
except:
model.drop()
raise
n = vdf.shape()[0]
k = len(X)
LM = n * R2
lm_pvalue = chi2.sf(LM, k)
F = (n - k - 1) * R2 / (1 - R2) / k
f_pvalue = f.sf(F, k, n - k - 1)
result = tablesample(
{
"index": [
"Lagrange Multiplier Statistic",
"lm_p_value",
"F Value",
"f_p_value",
],
"value": [LM, lm_pvalue, F, f_pvalue],
}
)
return result
# ---#
def het_arch(
vdf: vDataFrame, eps: str, ts: str, by: list = [], p: int = 1,
):
"""
---------------------------------------------------------------------------
Engle’s Test for Autoregressive Conditional Heteroscedasticity (ARCH).
Parameters
----------
vdf: vDataFrame
Input vDataFrame.
eps: str
Input residual vcolumn.
ts: str
vcolumn used as timeline. It will be to use to order the data. It can be
a numerical or type date like (date, datetime, timestamp...) vcolumn.
by: list, optional
vcolumns used in the partition.
p: int, optional
Number of lags to consider in the test.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types(
[
("eps", eps, [str],),
("ts", ts, [str],),
("p", p, [int, float],),
("vdf", vdf, [vDataFrame, str,],),
],
)
columns_check([eps, ts] + by, vdf)
eps = vdf_columns_names([eps], vdf)[0]
ts = vdf_columns_names([ts], vdf)[0]
by = vdf_columns_names(by, vdf)
X = []
X_names = []
for i in range(0, p + 1):
X += [
"LAG(POWER({}, 2), {}) OVER({}ORDER BY {}) AS lag_{}".format(
eps, i, ("PARTITION BY " + ", ".join(by)) if (by) else "", ts, i
)
]
X_names += ["lag_{}".format(i)]
query = "(SELECT {} FROM {}) VERTICAPY_SUBTABLE".format(
", ".join(X), vdf.__genSQL__()
)
vdf_lags = vdf_from_relation(query, cursor=vdf._VERTICAPY_VARIABLES_["cursor"])
from verticapy.learn.linear_model import LinearRegression
schema_writing = vdf._VERTICAPY_VARIABLES_["schema_writing"]
if not (schema_writing):
schema_writing = "public"
name = schema_writing + ".VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_{}".format(
get_session(vdf._VERTICAPY_VARIABLES_["cursor"])
)
model = LinearRegression(name, cursor=vdf._VERTICAPY_VARIABLES_["cursor"])
try:
model.fit(vdf_lags, X_names[1:], X_names[0])
R2 = model.score("r2")
model.drop()
except:
try:
model.set_params({"solver": "bfgs"})
model.fit(vdf_lags, X_names[1:], X_names[0])
R2 = model.score("r2")
model.drop()
except:
model.drop()
raise
n = vdf.shape()[0]
k = len(X)
LM = (n - p) * R2
lm_pvalue = chi2.sf(LM, p)
F = (n - 2 * p - 1) * R2 / (1 - R2) / p
f_pvalue = f.sf(F, p, n - 2 * p - 1)
result = tablesample(
{
"index": [
"Lagrange Multiplier Statistic",
"lm_p_value",
"F Value",
"f_p_value",
],
"value": [LM, lm_pvalue, F, f_pvalue],
}
)
return result
# ---#
def het_breuschpagan(
vdf: vDataFrame, eps: str, X: list,
):
"""
---------------------------------------------------------------------------
Uses the Breusch-Pagan to test a model for heteroskedasticity.
Parameters
----------
vdf: vDataFrame
Input vDataFrame.
eps: str
Input residual vColumn.
X: list
The exogenous variables to test.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types(
[("eps", eps, [str],), ("X", X, [list],), ("vdf", vdf, [vDataFrame, str,],),],
)
columns_check([eps] + X, vdf)
eps = vdf_columns_names([eps], vdf)[0]
X = vdf_columns_names(X, vdf)
from verticapy.learn.linear_model import LinearRegression
schema_writing = vdf._VERTICAPY_VARIABLES_["schema_writing"]
if not (schema_writing):
schema_writing = "public"
name = schema_writing + ".VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_{}".format(
get_session(vdf._VERTICAPY_VARIABLES_["cursor"])
)
model = LinearRegression(name, cursor=vdf._VERTICAPY_VARIABLES_["cursor"])
vdf_copy = vdf.copy()
vdf_copy["VERTICAPY_TEMP_eps2"] = vdf_copy[eps] ** 2
try:
model.fit(vdf_copy, X, "VERTICAPY_TEMP_eps2")
R2 = model.score("r2")
model.drop()
except:
try:
model.set_params({"solver": "bfgs"})
model.fit(vdf_copy, X, "VERTICAPY_TEMP_eps2")
R2 = model.score("r2")
model.drop()
except:
model.drop()
raise
n = vdf.shape()[0]
k = len(X)
LM = n * R2
lm_pvalue = chi2.sf(LM, k)
F = (n - k - 1) * R2 / (1 - R2) / k
f_pvalue = f.sf(F, k, n - k - 1)
result = tablesample(
{
"index": [
"Lagrange Multiplier Statistic",
"lm_p_value",
"F Value",
"f_p_value",
],
"value": [LM, lm_pvalue, F, f_pvalue],
}
)
return result
# ---#
def het_goldfeldquandt(
vdf: vDataFrame, y: str, X: list, idx: int = 0, split: float = 0.5
):
"""
---------------------------------------------------------------------------
Goldfeld-Quandt homoscedasticity test.
Parameters
----------
vdf: vDataFrame
Input vDataFrame.
y: str
Response Column.
X: list
Exogenous Variables.
idx: int, optional
Column index of variable according to which observations are sorted
for the split.
split: float, optional
Float to indicate where to split (Example: 0.5 to split on the median).
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
def model_fit(input_relation, X, y, model):
var = []
for vdf_tmp in input_relation:
model.drop()
model.fit(vdf_tmp, X, y)
model.predict(vdf_tmp, name="verticapy_prediction")
vdf_tmp["residual_0"] = vdf_tmp[y] - vdf_tmp["verticapy_prediction"]
var += [vdf_tmp["residual_0"].var()]
model.drop()
return var
check_types(
[
("y", y, [str],),
("X", X, [list],),
("idx", idx, [int, float],),
("split", split, [int, float],),
("vdf", vdf, [vDataFrame, str,],),
],
)
columns_check([y] + X, vdf)
y = vdf_columns_names([y], vdf)[0]
X = vdf_columns_names(X, vdf)
split_value = vdf[X[idx]].quantile(split)
vdf_0_half = vdf.search(vdf[X[idx]] < split_value)
vdf_1_half = vdf.search(vdf[X[idx]] > split_value)
from verticapy.learn.linear_model import LinearRegression
schema_writing = vdf._VERTICAPY_VARIABLES_["schema_writing"]
if not (schema_writing):
schema_writing = "public"
name = schema_writing + ".VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_{}".format(
get_session(vdf._VERTICAPY_VARIABLES_["cursor"])
)
model = LinearRegression(name, cursor=vdf._VERTICAPY_VARIABLES_["cursor"])
try:
var0, var1 = model_fit([vdf_0_half, vdf_1_half], X, y, model)
except:
try:
model.set_params({"solver": "bfgs"})
var0, var1 = model_fit([vdf_0_half, vdf_1_half], X, y, model)
except:
model.drop()
raise
n, m = vdf_0_half.shape()[0], vdf_1_half.shape()[0]
F = var0 / var1
f_pvalue = f.sf(F, n, m)
result = tablesample({"index": ["F Value", "f_p_value",], "value": [F, f_pvalue],})
return result
# ---#
def het_white(
vdf: vDataFrame, eps: str, X: list,
):
"""
---------------------------------------------------------------------------
White’s Lagrange Multiplier Test for heteroscedasticity.
Parameters
----------
vdf: vDataFrame
Input vDataFrame.
eps: str
Input residual vcolumn.
X: str
Exogenous Variables to test the heteroscedasticity on.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types(
[("eps", eps, [str],), ("X", X, [list],), ("vdf", vdf, [vDataFrame, str,],),],
)
columns_check([eps] + X, vdf)
eps = vdf_columns_names([eps], vdf)[0]
X = vdf_columns_names(X, vdf)
X_0 = ["1"] + X
variables = []
variables_names = []
for i in range(len(X_0)):
for j in range(i, len(X_0)):
if i != 0 or j != 0:
variables += ["{} * {} AS var_{}_{}".format(X_0[i], X_0[j], i, j)]
variables_names += ["var_{}_{}".format(i, j)]
query = "(SELECT {}, POWER({}, 2) AS VERTICAPY_TEMP_eps2 FROM {}) VERTICAPY_SUBTABLE".format(
", ".join(variables), eps, vdf.__genSQL__()
)
vdf_white = vdf_from_relation(query, cursor=vdf._VERTICAPY_VARIABLES_["cursor"])
from verticapy.learn.linear_model import LinearRegression
schema_writing = vdf._VERTICAPY_VARIABLES_["schema_writing"]
if not (schema_writing):
schema_writing = "public"
name = schema_writing + ".VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_{}".format(
get_session(vdf._VERTICAPY_VARIABLES_["cursor"])
)
model = LinearRegression(name, cursor=vdf._VERTICAPY_VARIABLES_["cursor"])
try:
model.fit(vdf_white, variables_names, "VERTICAPY_TEMP_eps2")
R2 = model.score("r2")
model.drop()
except:
try:
model.set_params({"solver": "bfgs"})
model.fit(vdf_white, variables_names, "VERTICAPY_TEMP_eps2")
R2 = model.score("r2")
model.drop()
except:
model.drop()
raise
n = vdf.shape()[0]
if len(X) > 1:
k = 2 * len(X) + math.factorial(len(X)) / 2 / (math.factorial(len(X) - 2))
else:
k = 1
LM = n * R2
lm_pvalue = chi2.sf(LM, k)
F = (n - k - 1) * R2 / (1 - R2) / k
f_pvalue = f.sf(F, k, n - k - 1)
result = tablesample(
{
"index": [
"Lagrange Multiplier Statistic",
"lm_p_value",
"F Value",
"f_p_value",
],
"value": [LM, lm_pvalue, F, f_pvalue],
}
)
return result
# ---#
def jarque_bera(vdf: vDataFrame, column: str, alpha: float = 0.05):
"""
---------------------------------------------------------------------------
Jarque-Bera test (Distribution Normality).
Parameters
----------
vdf: vDataFrame
input vDataFrame.
column: str
Input vcolumn to test.
alpha: float, optional
Significance Level. Probability to accept H0.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types(
[
("column", column, [str],),
("alpha", alpha, [int, float],),
("vdf", vdf, [vDataFrame,],),
],
)
columns_check([column], vdf)
column = vdf_columns_names([column], vdf)[0]
jb, kurtosis, skewness, n = (
vdf[column].agg(["jb", "kurtosis", "skewness", "count"]).values[column]
)
pvalue = chi2.sf(jb, 2)
result = False if pvalue < alpha else True
result = tablesample(
{
"index": [
"Jarque Bera Test Statistic",
"p_value",
"# Observations Used",
"Kurtosis - 3",
"Skewness",
"Distribution Normality",
],
"value": [jb, pvalue, n, kurtosis, skewness, result],
}
)
return result
# ---#
def kurtosistest(vdf: vDataFrame, column: str):
"""
---------------------------------------------------------------------------
Test whether the kurtosis is different from the normal distribution.
Parameters
----------
vdf: vDataFrame
input vDataFrame.
column: str
Input vcolumn to test.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types([("column", column, [str],), ("vdf", vdf, [vDataFrame,],),],)
columns_check([column], vdf)
column = vdf_columns_names([column], vdf)[0]
g2, n = vdf[column].agg(["kurtosis", "count"]).values[column]
mu1 = -6 / (n + 1)
mu2 = 24 * n * (n - 2) * (n - 3) / (((n + 1) ** 2) * (n + 3) * (n + 5))
gamma1 = (
6
* (n ** 2 - 5 * n + 2)
/ ((n + 7) * (n + 9))
* math.sqrt(6 * (n + 3) * (n + 5) / (n * (n - 2) * (n - 3)))
)
A = 6 + 8 / gamma1 * (2 / gamma1 + math.sqrt(1 + 4 / (gamma1 ** 2)))
B = (1 - 2 / A) / (1 + (g2 - mu1) / math.sqrt(mu2) * math.sqrt(2 / (A - 4)))
B = B ** (1 / 3) if B > 0 else (-B) ** (1 / 3)
Z2 = math.sqrt(9 * A / 2) * (1 - 2 / (9 * A) - B)
pvalue = 2 * norm.sf(abs(Z2))
result = tablesample({"index": ["Statistic", "p_value",], "value": [Z2, pvalue],})
return result
# ---#
def ljungbox(
vdf: vDataFrame,
column: str,
ts: str,
by: list = [],
p: int = 1,
alpha: float = 0.05,
box_pierce: bool = False,
):
"""
---------------------------------------------------------------------------
Ljung–Box test (whether any of a group of autocorrelations of a time series
are different from zero).
Parameters
----------
vdf: vDataFrame
Input vDataFrame.
column: str
Input vcolumn to test.
ts: str
vcolumn used as timeline. It will be to use to order the data. It can be
a numerical or type date like (date, datetime, timestamp...) vcolumn.
by: list, optional
vcolumns used in the partition.
p: int, optional
Number of lags to consider in the test.
alpha: float, optional
Significance Level. Probability to accept H0.
box_pierce: bool
If set to True, the Box-Pierce statistic will be used.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types(
[
("ts", ts, [str],),
("column", column, [str],),
("by", by, [list],),
("p", p, [int, float],),
("alpha", alpha, [int, float],),
("box_pierce", box_pierce, [bool],),
("vdf", vdf, [vDataFrame,],),
],
)
columns_check([column] + [ts] + by, vdf)
column = vdf_columns_names([column], vdf)[0]
ts = vdf_columns_names([ts], vdf)[0]
by = vdf_columns_names(by, vdf)
acf = vdf.acf(column=column, ts=ts, by=by, p=p, show=False)
if p >= 2:
acf = acf.values["value"]
else:
acf = [acf]
n = vdf[column].count()
name = (
"Ljung–Box Test Statistic" if not (box_pierce) else "Box-Pierce Test Statistic"
)
result = tablesample(
{"index": [], name: [], "p_value": [], "Serial Correlation": []}
)
Q = 0
for k in range(p):
div = n - k - 1 if not (box_pierce) else 1
mult = n * (n + 2) if not (box_pierce) else n
Q += mult * acf[k] ** 2 / div
pvalue = chi2.sf(Q, k + 1)
result.values["index"] += [k + 1]
result.values[name] += [Q]
result.values["p_value"] += [pvalue]
result.values["Serial Correlation"] += [True if pvalue < alpha else False]
return result
# ---#
def mkt(vdf: vDataFrame, column: str, ts: str, alpha: float = 0.05):
"""
---------------------------------------------------------------------------
Mann Kendall test (Time Series trend).
\u26A0 Warning : This Test is computationally expensive. It is using a CROSS
JOIN during the computation. The complexity is O(n * k), n
being the total count of the vDataFrame and k the number
of rows to use to do the test.
Parameters
----------
vdf: vDataFrame
Input vDataFrame.
column: str
Input vcolumn to test.
ts: str
vcolumn used as timeline. It will be to use to order the data. It can be
a numerical or type date like (date, datetime, timestamp...) vcolumn.
alpha: float, optional
Significance Level. Probability to accept H0.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types(
[
("ts", ts, [str],),
("column", column, [str],),
("alpha", alpha, [int, float],),
("vdf", vdf, [vDataFrame,],),
],
)
columns_check([column, ts], vdf)
column = vdf_columns_names([column], vdf)[0]
ts = vdf_columns_names([ts], vdf)[0]
table = "(SELECT {}, {} FROM {})".format(column, ts, vdf.__genSQL__())
query = "SELECT SUM(SIGN(y.{} - x.{})) FROM {} x CROSS JOIN {} y WHERE y.{} > x.{}".format(
column, column, table, table, ts, ts
)
vdf.__executeSQL__(query, title="Computes the Mann Kendall S.")
S = vdf._VERTICAPY_VARIABLES_["cursor"].fetchone()[0]
try:
S = float(S)
except:
S = None
n = vdf[column].count()
query = "SELECT SQRT(({} * ({} - 1) * (2 * {} + 5) - SUM(row * (row - 1) * (2 * row + 5))) / 18) FROM (SELECT row FROM (SELECT ROW_NUMBER() OVER (PARTITION BY {}) AS row FROM {}) VERTICAPY_SUBTABLE GROUP BY row) VERTICAPY_SUBTABLE".format(
n, n, n, column, vdf.__genSQL__()
)
vdf.__executeSQL__(query, title="Computes the Mann Kendall S standard deviation.")
STDS = vdf._VERTICAPY_VARIABLES_["cursor"].fetchone()[0]
try:
STDS = float(STDS)
except:
STDS = None
if STDS in (None, 0) or S == None:
return None
if S > 0:
ZMK = (S - 1) / STDS
trend = "increasing"
elif S < 0:
ZMK = (S + 1) / STDS
trend = "decreasing"
else:
ZMK = 0
trend = "no trend"
pvalue = 2 * norm.sf(abs(ZMK))
result = (
True
if (ZMK <= 0 and pvalue < alpha) or (ZMK >= 0 and pvalue < alpha)
else False
)
if not (result):
trend = "no trend"
result = tablesample(
{
"index": [
"Mann Kendall Test Statistic",
"S",
"STDS",
"p_value",
"Monotonic Trend",
"Trend",
],
"value": [ZMK, S, STDS, pvalue, result, trend],
}
)
return result
# ---#
def normaltest(vdf: vDataFrame, column: str):
"""
---------------------------------------------------------------------------
Test whether a sample differs from a normal distribution.
Parameters
----------
vdf: vDataFrame
input vDataFrame.
column: str
Input vcolumn to test.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
Z1, Z2 = skewtest(vdf, column)["value"][0], kurtosistest(vdf, column)["value"][0]
Z = Z1 ** 2 + Z2 ** 2
pvalue = chi2.sf(Z, 2)
result = tablesample({"index": ["Statistic", "p_value",], "value": [Z, pvalue],})
return result
# ---#
def seasonal_decompose(
vdf: vDataFrame,
column: str,
ts: str,
by: list = [],
period: int = -1,
polynomial_order: int = 1,
estimate_seasonality: bool = True,
rule: Union[str, datetime.timedelta] = None,
mult: bool = False,
two_sided: bool = False,
):
"""
---------------------------------------------------------------------------
Performs a seasonal time series decomposition.
Parameters
----------
vdf: vDataFrame
Input vDataFrame.
column: str
Input vcolumn to decompose.
ts: str
TS (Time Series) vcolumn to use to order the data. It can be of type date
or a numerical vcolumn.
by: list, optional
vcolumns used in the partition.
period: int, optional
Time Series period. It is used to retrieve the seasonality component.
if period <= 0, the seasonal component will be estimated using ACF. In
this case, polynomial_order must be greater than 0.
polynomial_order: int, optional
If greater than 0, the trend will be estimated using a polynomial of degree
'polynomial_order'. The parameter 'two_sided' will be ignored.
If equal to 0, the trend will be estimated using Moving Averages.
estimate_seasonality: bool, optional
If set to True, the seasonality will be estimated using cosine and sine
functions.
rule: str / time, optional
Interval to use to slice the time. For example, '5 minutes' will create records
separated by '5 minutes' time interval.
mult: bool, optional
If set to True, the decomposition type will be 'multiplicative'. Otherwise,
it is 'additive'.
two_sided: bool, optional
If set to True, a centered moving average is used for the trend isolation.
Otherwise only past values are used.
Returns
-------
vDataFrame
object containing (ts, column, TS seasonal part, TS trend, TS noise).
"""
if isinstance(by, str):
by = [by]
check_types(
[
("ts", ts, [str],),
("column", column, [str],),
("by", by, [list],),
("rule", rule, [str, datetime.timedelta,],),
("vdf", vdf, [vDataFrame,],),
("period", period, [int,],),
("mult", mult, [bool,],),
("two_sided", two_sided, [bool,],),
("polynomial_order", polynomial_order, [int,],),
("estimate_seasonality", estimate_seasonality, [bool,],),
],
)
assert period > 0 or polynomial_order > 0, ParameterError("Parameters 'polynomial_order' and 'period' can not be both null.")
columns_check([column, ts] + by, vdf)
ts, column, by = (
vdf_columns_names([ts], vdf)[0],
vdf_columns_names([column], vdf)[0],
vdf_columns_names(by, vdf),
)
if rule:
vdf_tmp = vdf.asfreq(ts=ts, rule=period, method={column: "linear"}, by=by)
else:
vdf_tmp = vdf[[ts, column]]
trend_name, seasonal_name, epsilon_name = (
"{}_trend".format(column[1:-1]),
"{}_seasonal".format(column[1:-1]),
"{}_epsilon".format(column[1:-1]),
)
by, by_tmp = "" if not (by) else "PARTITION BY " + ", ".join(vdf_columns_names(by, self)) + " ", by
if polynomial_order <= 0:
if two_sided:
if period == 1:
window = (-1, 1)
else:
if period % 2 == 0:
window = (-period / 2 + 1, period / 2)
else:
window = (int(-period / 2), int(period / 2))
else:
if period == 1:
window = (-2, 0)
else:
window = (-period + 1, 0)
vdf_tmp.rolling("avg", window, column, by_tmp, ts, trend_name)
else:
vdf_poly = vdf_tmp.copy()
X = []
for i in range(1, polynomial_order + 1):
vdf_poly[f"t_{i}"] = f"POWER(ROW_NUMBER() OVER ({by}ORDER BY {ts}), {i})"
X += [f"t_{i}"]
schema = vdf_poly._VERTICAPY_VARIABLES_["schema_writing"]
if not (schema):
schema = vdf_poly._VERTICAPY_VARIABLES_["schema"]
if not (schema):
schema = "public"
from verticapy.learn.linear_model import LinearRegression
model = LinearRegression(name="{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_{}".format(schema, get_session(vdf_poly._VERTICAPY_VARIABLES_["cursor"])),
cursor=vdf_poly._VERTICAPY_VARIABLES_["cursor"],
solver="bfgs",
max_iter=100,
tol=1e-6,)
model.drop()
model.fit(vdf_poly, X, column)
coefficients = model.coef_["coefficient"]
coefficients = [str(coefficients[0])] + [f"{coefficients[i]} * POWER(ROW_NUMBER() OVER({by}ORDER BY {ts}), {i})" if i != 1 else f"{coefficients[1]} * ROW_NUMBER() OVER({by}ORDER BY {ts})" for i in range(1, polynomial_order + 1)]
vdf_tmp[trend_name] = " + ".join(coefficients)
model.drop()
if mult:
vdf_tmp[seasonal_name] = f'{column} / NULLIFZERO("{trend_name}")'
else:
vdf_tmp[seasonal_name] = vdf_tmp[column] - vdf_tmp[trend_name]
if period <= 0:
acf = vdf_tmp.acf(column=seasonal_name, ts=ts, p=23, acf_type="heatmap", show=False)
period = int(acf["index"][1].split("_")[1])
if period == 1:
period = int(acf["index"][2].split("_")[1])
vdf_tmp["row_number_id"] = f"MOD(ROW_NUMBER() OVER ({by} ORDER BY {ts}), {period})"
if mult:
vdf_tmp[
seasonal_name
] = f"AVG({seasonal_name}) OVER (PARTITION BY row_number_id) / NULLIFZERO(AVG({seasonal_name}) OVER ())"
else:
vdf_tmp[
seasonal_name
] = f"AVG({seasonal_name}) OVER (PARTITION BY row_number_id) - AVG({seasonal_name}) OVER ()"
if estimate_seasonality:
vdf_seasonality = vdf_tmp.copy()
vdf_seasonality["t_cos"] = f"COS(2 * PI() * ROW_NUMBER() OVER ({by}ORDER BY {ts}) / {period})"
vdf_seasonality["t_sin"] = f"SIN(2 * PI() * ROW_NUMBER() OVER ({by}ORDER BY {ts}) / {period})"
X = ["t_cos", "t_sin",]
schema = vdf_seasonality._VERTICAPY_VARIABLES_["schema_writing"]
if not (schema):
schema = vdf_seasonality._VERTICAPY_VARIABLES_["schema"]
if not (schema):
schema = "public"
from verticapy.learn.linear_model import LinearRegression
model = LinearRegression(name="{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_{}".format(schema, get_session(vdf_seasonality._VERTICAPY_VARIABLES_["cursor"])),
cursor=vdf_seasonality._VERTICAPY_VARIABLES_["cursor"],
solver="bfgs",
max_iter=100,
tol=1e-6,)
model.drop()
model.fit(vdf_seasonality, X, seasonal_name)
coefficients = model.coef_["coefficient"]
vdf_tmp[seasonal_name] = f"{coefficients[0]} + {coefficients[1]} * COS(2 * PI() * ROW_NUMBER() OVER ({by}ORDER BY {ts}) / {period}) + {coefficients[2]} * SIN(2 * PI() * ROW_NUMBER() OVER ({by}ORDER BY {ts}) / {period})"
model.drop()
if mult:
vdf_tmp[
epsilon_name
] = f'{column} / NULLIFZERO("{trend_name}") / NULLIFZERO("{seasonal_name}")'
else:
vdf_tmp[epsilon_name] = (
vdf_tmp[column] - vdf_tmp[trend_name] - vdf_tmp[seasonal_name]
)
vdf_tmp["row_number_id"].drop()
return vdf_tmp
# ---#
def skewtest(vdf: vDataFrame, column: str):
"""
---------------------------------------------------------------------------
Test whether the skewness is different from the normal distribution.
Parameters
----------
vdf: vDataFrame
input vDataFrame.
column: str
Input vcolumn to test.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types([("column", column, [str],), ("vdf", vdf, [vDataFrame,],),],)
columns_check([column], vdf)
column = vdf_columns_names([column], vdf)[0]
g1, n = vdf[column].agg(["skewness", "count"]).values[column]
mu1 = 0
mu2 = 6 * (n - 2) / ((n + 1) * (n + 3))
gamma1 = 0
gamma2 = (
36 * (n - 7) * (n ** 2 + 2 * n - 5) / ((n - 2) * (n + 5) * (n + 7) * (n + 9))
)
W2 = math.sqrt(2 * gamma2 + 4) - 1
delta = 1 / math.sqrt(math.log(math.sqrt(W2)))
alpha2 = 2 / (W2 - 1)
Z1 = delta * math.asinh(g1 / math.sqrt(alpha2 * mu2))
pvalue = 2 * norm.sf(abs(Z1))
result = tablesample({"index": ["Statistic", "p_value",], "value": [Z1, pvalue],})
return result
# ---#
def variance_inflation_factor(
vdf: vDataFrame, X: list, X_idx: int = None,
):
"""
---------------------------------------------------------------------------
Computes the variance inflation factor (VIF). It can be used to detect
multicollinearity in an OLS Regression Analysis.
Parameters
----------
vdf: vDataFrame
Input vDataFrame.
X: list
Input Variables.
X_idx: int
Index of the exogenous variable in X. If left to None, a tablesample will
be returned with all the variables VIF.
Returns
-------
float
VIF.
"""
check_types(
[
("X_idx", X_idx, [int],),
("X", X, [list],),
("vdf", vdf, [vDataFrame, str,],),
],
)
columns_check(X, vdf)
X = vdf_columns_names(X, vdf)
if isinstance(X_idx, str):
columns_check([X_idx], vdf)
for i in range(len(X)):
if str_column(X[i]) == str_column(X_idx):
X_idx = i
break
if isinstance(X_idx, (int, float)):
X_r = []
for i in range(len(X)):
if i != X_idx:
X_r += [X[i]]
y_r = X[X_idx]
from verticapy.learn.linear_model import LinearRegression
schema_writing = vdf._VERTICAPY_VARIABLES_["schema_writing"]
if not (schema_writing):
schema_writing = "public"
name = schema_writing + ".VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_{}".format(
get_session(vdf._VERTICAPY_VARIABLES_["cursor"])
)
model = LinearRegression(name, cursor=vdf._VERTICAPY_VARIABLES_["cursor"])
try:
model.fit(vdf, X_r, y_r)
R2 = model.score("r2")
model.drop()
except:
try:
model.set_params({"solver": "bfgs"})
model.fit(vdf, X_r, y_r)
R2 = model.score("r2")
model.drop()
except:
model.drop()
raise
if 1 - R2 != 0:
return 1 / (1 - R2)
else:
return np.inf
elif X_idx == None:
VIF = []
for i in range(len(X)):
VIF += [variance_inflation_factor(vdf, X, i)]
return tablesample({"X_idx": X, "VIF": VIF})
else:
raise ParameterError(
f"Wrong type for Parameter X_idx.\nExpected integer, found {type(X_idx)}."
)
|
py | 1a521f5fbc2653923ab0719a74a6b64a88083257 | from math import sqrt
import unittest
from .solution import validSquare
class TestCase(unittest.TestCase):
def test_same_point(self):
p1 = [0,0]
p2 = [0,0]
p3 = [0,0]
p4 = [0,0]
self.assertFalse(validSquare(p1, p2, p3, p4))
def test_valid_square(self):
p1 = [0,0]
p2 = [1,1]
p3 = [1,0]
p4 = [0,1]
self.assertTrue(validSquare(p1, p2, p3, p4))
def test_rectangle(self):
p1 = [0,0]
p2 = [2,1]
p3 = [2,0]
p4 = [0,1]
self.assertFalse(validSquare(p1, p2, p3, p4))
def test_rotated_square(self):
p1 = [3 - sqrt(6), 3 + sqrt(2)]
p2 = [3 + sqrt(2), 3 + sqrt(6)]
p3 = [3 - sqrt(2), 3 - sqrt(6)]
p4 = [3 + sqrt(6), 3 - sqrt(2)]
self.assertTrue(validSquare(p1, p2, p3, p4)) |
py | 1a521f6fcda9de79e56574702b134dc731cacfbc | import argparse
import collections
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageEnhance
import albumentations as A
import albumentations.pytorch
from tqdm.notebook import tqdm
import cv2
import re
import time
from retinanet import model
# from retinanet import retina
from retinanet.dataloader import *
from retinanet.anchors import Anchors
# from scheduler import *
#Torch
import torch
import torch.nn as nn
from torch.utils.data import Dataset,DataLoader
from torch.utils.data.sampler import SequentialSampler, RandomSampler
from torch.optim import Adam, lr_scheduler
import torch.optim as optim
from pycocotools.cocoeval import COCOeval
import json
import torch
def main(args=None):
parser = argparse.ArgumentParser(description='Simple paps training script for training a RetinaNet network.')
parser.add_argument('--batch_size', help='Number of batchs', type=int, default=0)
parser.add_argument('--test_data', help='test data file', default='data/test.npy')
parser.add_argument('--model_dir', help='pretrained model dir', default='trained_models/resnet50_640/model.pt')
parser.add_argument('--threshold', help='pretrained model dir', type=float, default=0.1)
parser = parser.parse_args(args)
GPU_NUM = 0 # 원하는 GPU 번호 입력
device = torch.device(f'cuda:{GPU_NUM}' if torch.cuda.is_available() else 'cpu')
torch.cuda.set_device(device) # change allocation of current GPU
print('device', device)
retinanet = model.resnet50(num_classes=2, device=device)
retinanet = torch.nn.DataParallel(retinanet, device_ids = [GPU_NUM], output_device=GPU_NUM).to(device)
retinanet.load_state_dict(torch.load(parser.model_dir))
# retinanet.to(device)
dataset_val = PapsDataset('data/', set_name='val_2class',
transform=val_transforms)
val_data_loader = DataLoader(
dataset_val,
batch_size=1,
shuffle=False,
num_workers=4,
collate_fn=collate_fn
)
retinanet.eval()
start_time = time.time()
threshold = parser.threshold
results = []
GT_results = []
image_ids = []
cnt = 0
for index, data in enumerate(tqdm(val_data_loader)) :
if cnt > 100 :
break
cnt += 1
with torch.no_grad():
images, tbox, tlabel, targets = data
batch_size = len(images)
# print(tbox)
# print(len(tbox[0]))
c, h, w = images[0].shape
images = torch.cat(images).view(-1, c, h, w).to(device)
outputs = retinanet(images)
scores, labels, boxes = (outputs)
scores = scores.cpu()
labels = labels.cpu()
boxes = boxes.cpu()
if boxes.shape[0] > 0:
# change to (x, y, w, h) (MS COCO standard)
boxes[:, 2] -= boxes[:, 0]
boxes[:, 3] -= boxes[:, 1]
# print(boxes)
# compute predicted labels and scores
#for box, score, label in zip(boxes[0], scores[0], labels[0]):
for box_id in range(boxes.shape[0]):
score = float(scores[box_id])
label = int(labels[box_id])
box = boxes[box_id, :]
# scores are sorted, so we can break
if score < threshold:
break
# append detection for each positively labeled class
image_result = {
'image_id' : dataset_val.image_ids[index],
'category_id' : dataset_val.label_to_coco_label(label),
'score' : float(score),
'bbox' : box.tolist(),
}
# append detection to results
results.append(image_result)
if len(tbox[0]) > 0:
# compute predicted labels and scores
#for box, score, label in zip(boxes[0], scores[0], labels[0]):
for box_id in range(len(tbox[0])):
score = float(0.99)
label = (tlabel[0][box_id])
box = list(tbox[0][box_id])
box[2] -= box[0]
box[3] -= box[1]
# append detection for each positively labeled class
image_result = {
'image_id' : dataset_val.image_ids[index],
'category_id' : dataset_val.label_to_coco_label(label),
'score' : float(score),
'bbox' : list(box),
}
# append detection to results
GT_results.append(image_result)
# append image to list of processed images
image_ids.append(dataset_val.image_ids[index])
# print progress
print('{}/{}'.format(index, len(dataset_val)), end='\r')
if not len(results):
print('No object detected')
print('GT_results', len(GT_results))
print('pred_results', len(results))
# write output
json.dump(results, open('trained_models/eval/{}_bbox_results.json'.format(dataset_val.set_name), 'w'), indent=4)
# write GT
json.dump(GT_results, open('trained_models/eval/{}_GTbbox_results.json'.format(dataset_val.set_name), 'w'), indent=4)
print('validation time :', time.time() - start_time)
# load results in COCO evaluation tool
coco_true = dataset_val.coco
coco_pred = coco_true.loadRes('trained_models/eval/{}_bbox_results.json'.format(dataset_val.set_name))
coco_gt = coco_true.loadRes('trained_models/eval/{}_GTbbox_results.json'.format(dataset_val.set_name))
# run COCO evaluation
# coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
coco_eval = COCOeval(coco_gt, coco_pred, 'bbox')
coco_eval.params.imgIds = image_ids
# coco_eval.params.catIds = [0]
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
if __name__ == '__main__':
main()
|
py | 1a5220fcf78e5b9143e96d454d1d8fe84a96d2b2 | # Copyright 2019 Thai-Son Nguyen
# Licensed under the Apache License, Version 2.0 (the "License")
import random
import struct
import os
import numpy as np
import torch
from torch.nn.utils.rnn import pack_sequence
from . import smart_open
class ScpStreamReader(object):
def __init__(self, scp_path, label_path=None, time_idx_path=None, sek=True, downsample=1,
sort_src=False, pack_src=False, max_len=10000, max_utt=4096,
mean_sub=False, zero_pad=0,
spec_drop=False, spec_bar=2, time_stretch=False, time_win=10000,
sub_seq=0.25, ss_static=False, shuffle=False, fp16=False):
self.scp_path = scp_path # path to the .scp file
self.label_path = label_path # path to the label file
self.time_idx_path = time_idx_path
self.downsample = downsample
self.shuffle = shuffle
self.sort_src = sort_src
self.pack_src = pack_src
self.max_len = max_len
self.sek = sek
self.mean_sub = mean_sub
self.zero_pad = zero_pad
self.spec_drop = spec_drop
self.spec_bar = spec_bar
self.time_stretch = time_stretch
self.time_win = time_win
self.ts_scale = 1.
self.sub_seq = sub_seq
self.ss_static = ss_static
self.fp16 = fp16
self.scp_dir = ''
self.scp_file = None
self.scp_pos = 0
self.max_utt = max_utt
# store features for each data partition
self.feat = []
self.label = []
# store all label in a dictionary
self.label_dic = None
self.time_idx = None
self.end_reading = False
# read the feature matrix of the next utterance
def read_all_label(self):
self.label_dic = {}
label_file = smart_open(self.label_path, 'r')
for line in label_file:
tokens = line.split()
utt_id = tokens[0]
if utt_id == '' or utt_id is None: continue
self.label_dic[utt_id] = [int(token) for token in tokens[1:]]
def read_time_index(self):
self.time_idx = {}
idx_file = smart_open(self.time_idx_path, 'r')
for line in idx_file:
tokens = line.split()
utt_id = tokens[0]
tokens = [int(token) for token in tokens[1:]]
l = len(tokens) // 2
j = 0
sid, eid = [], []
for i in range(len(self.label_dic[utt_id])):
if j < l-1 and i >= tokens[j+1]: j += 1
#eid.append(10000 if j == l-1 else (tokens[l+j+1]//4+1))
eid.append(10000 if j == l-1 else (tokens[l+j+1]//4+8))
sid.append(tokens[l+j]//4)
self.time_idx[utt_id] = (sid, eid)
def _read_string(self, ark_file):
s = ''
while True:
c = ark_file.read(1).decode('utf-8')
if c == ' ' or c == '': return s
s += c
def _read_integer(self, ark_file):
n = ord(ark_file.read(1))
return struct.unpack('>i', ark_file.read(n)[::-1])[0]
def initialize(self):
if self.scp_file is None:
self.scp_file = [line.rstrip('\n') for line in smart_open(self.scp_path, 'r')]
path = os.path.dirname(self.scp_path)
self.scp_dir = path + '/' if path != '' else None
self.scp_pos = 0
if self.shuffle: random.shuffle(self.scp_file)
if self.label_path is not None and self.label_dic is None:
self.read_all_label()
print("Loaded labels of %d utterances" % len(self.label_dic))
if self.time_idx_path is not None and self.time_idx is None:
self.read_time_index()
self.utt_index = 0
if self.max_utt < 0 and len(self.feat) > 0:
self.utt_count = len(self.feat)
else:
self.utt_count = 0
self.end_reading = False
self.ts_scale = 1.
# read the feature matrix of the next utterance
def read_next_utt(self):
if self.scp_pos >= len(self.scp_file):
return '', None
line = self.scp_file[self.scp_pos]
utt_id, path_pos = line.replace('\n','').split(' ')
path, pos = path_pos.split(':')
if not path.startswith('/') and self.scp_dir is not None:
path = self.scp_dir + path
self.scp_pos += 1
ark_file = smart_open(path, 'rb')
ark_file.seek(int(pos))
header = ark_file.read(2).decode('utf-8')
if header != "\0B":
print("Input .ark file is not binary"); exit(1)
format = self._read_string(ark_file)
if format == "FM":
rows = self._read_integer(ark_file)
cols = self._read_integer(ark_file)
#print rows, cols
utt_mat = struct.unpack("<%df" % (rows * cols), ark_file.read(rows*cols*4))
utt_mat = np.array(utt_mat, dtype="float32")
if self.fp16:
utt_mat = utt_mat.astype("float16")
if self.zero_pad > 0:
rows += self.zero_pad
utt_mat.resize(rows*cols)
utt_mat = np.reshape(utt_mat, (rows, cols))
else:
print("Unsupported .ark file with %s format" % format); exit(1)
ark_file.close()
return utt_id, utt_mat
def read_batch_utt(self, batch_size=32):
feats = []
ids = []
i = 0
while i < batch_size:
utt_id, utt_mat = self.read_next_utt()
if utt_id is None or utt_id == '': break
feats.append(utt_mat)
ids.append(utt_id)
i += 1
if len(feats) == 0: return ([], [], [])
lst = sorted(zip(feats, ids), key=lambda e : -e[0].shape[0])
src, ids = zip(*lst)
src = self.augment_src(src)
src = self.collate_src(src)
return (*src, ids)
def read_utt_label(self, utt_id, utt_mat):
if not utt_id in self.label_dic:
#print('Labels not found for %s' % utt_id)
return utt_mat, None
if len(utt_mat) >= self.max_len:
return utt_mat, None
utt_lbl = self.label_dic[utt_id]
if self.sek and utt_lbl is not None:
utt_lbl = [1] + [el+2 for el in utt_lbl] + [2]
if self.time_idx is None:
tid = (None, None)
else:
tid = self.time_idx[utt_id]
utt_lbl = (utt_lbl, tid)
return utt_mat, utt_lbl
def next_partition(self):
if self.end_reading:
return 0
self.feat = []
self.label = []
while self.max_utt < 0 or len(self.feat) < self.max_utt:
utt_id, utt_mat = self.read_next_utt()
if utt_id == '': # No more utterances available
self.end_reading = True
break
utt_mat, utt_lbl = self.read_utt_label(utt_id, utt_mat)
if utt_lbl is None: continue
self.feat.append(utt_mat)
self.label.append(utt_lbl)
return len(self.feat)
def available(self):
if self.utt_index >= self.utt_count:
self.utt_count = self.next_partition()
self.utt_index = 0
return self.utt_index < self.utt_count
def timefreq_drop_inst(self, inst, num=2, time_drop=0.25, freq_drop=0.25):
time_num, freq_num = inst.shape
freq_num = freq_num
time_len = 72
max_time = int(time_drop*time_num)
for i in range(num):
n = min(max_time, random.randint(0, time_len))
t0 = random.randint(0, time_num-n)
inst[t0:t0+n, :] = 0
max_time -= n
n = random.randint(0, int(freq_drop*freq_num))
f0 = random.randint(0, freq_num-n)
inst[:, f0:f0+n] = 0
return inst
def time_stretch_inst(self, inst, low=0.8, high=1.25, win=10000):
time_len = inst.shape[0]
ids = None
for i in range((time_len // win) + 1):
s = random.uniform(low, high)
e = min(time_len, win*(i+1))
r = np.arange(win*i, e-1, s, dtype=np.float32)
r = np.round(r).astype(np.int32)
ids = r if ids is None else np.concatenate((ids, r))
self.ts_scale = s
return inst[ids]
def mean_sub_inst(self, inst):
return inst - inst.mean(axis=0, keepdims=True)
def down_sample_inst(self, feature, cf=4):
feature = feature[:(feature.shape[0]//cf)*cf,:]
return feature.reshape(feature.shape[0]//cf, feature.shape[1]*cf)
def augment_src(self, src):
insts = []
for inst in src:
inst = self.time_stretch_inst(inst, win=self.time_win) if self.time_stretch else inst
inst = self.mean_sub_inst(inst) if self.mean_sub else inst
inst = self.timefreq_drop_inst(inst, num=self.spec_bar) if self.spec_drop else inst
inst = self.down_sample_inst(inst, self.downsample) if self.downsample > 1 else inst
insts.append(inst)
return insts
def collate_src(self, insts):
max_len = max(inst.shape[0] for inst in insts)
inputs = np.zeros((len(insts), max_len, insts[0].shape[1]))
masks = torch.zeros((len(insts), max_len), dtype=torch.uint8)
for idx, inst in enumerate(insts):
inputs[idx, :inst.shape[0], :] = inst
masks[idx, :inst.shape[0]] = 1
inputs = torch.HalfTensor(inputs) if self.fp16 else torch.FloatTensor(inputs)
return inputs, masks
def collate_src_pack(self, insts):
max_len = max(inst.shape[0] for inst in insts)
masks = torch.zeros((len(insts), max_len), dtype=torch.uint8)
inputs = []
for idx, inst in enumerate(insts):
inputs.append(torch.HalfTensor(inst) if self.fp16 else torch.FloatTensor(inst))
masks[idx, 0:inst.shape[0]] = 1
inputs = pack_sequence(inputs)
return inputs, masks
def collate_tgt(self, insts):
tgt, tid = zip(*insts)
max_len = max(len(inst) for inst in tgt)
labels = np.array([inst + [0] * (max_len - len(inst)) for inst in tgt])
labels = torch.LongTensor(labels)
max_len -= 1
sid, eid = zip(*tid)
if None not in sid:
sid = np.array([inst + [0] * (max_len - len(inst)) for inst in sid])
sid = torch.LongTensor(sid * self.ts_scale + 0.5)
if None not in eid:
eid = np.array([inst + [10000] * (max_len - len(inst)) for inst in eid])
eid = torch.LongTensor(eid * self.ts_scale + 0.5)
return labels, sid, eid
def next_batch(self, batch_size=16):
src = self.feat[self.utt_index:self.utt_index+batch_size]
tgt = self.label[self.utt_index:self.utt_index+batch_size]
src = self.augment_src(src)
if self.sort_src or self.pack_src:
lst = sorted(zip(src, tgt), key=lambda e : -e[0].shape[0])
src, tgt = zip(*lst)
self.utt_index += len(src)
src = self.collate_src(src) if not self.pack_src else self.collate_src_pack(src)
tgt = self.collate_tgt(tgt)
return (*src, *tgt)
def next(self, batch_input=3000):
l = len(self.feat)
j = i = self.utt_index
max_l = 0
while j < l:
max_l = max(max_l, self.feat[j].shape[0])
if j > i and max_l*(j-i+1) > batch_input: break
j += 1
last = (j==l)
src, tgt = self.feat[self.utt_index:j], self.label[self.utt_index:j]
src = self.augment_src(src)
if self.sort_src or self.pack_src:
lst = sorted(zip(src, tgt), key=lambda e : -e[0].shape[0])
src, tgt = zip(*lst)
seqs = len(src)
self.utt_index += seqs
src = self.collate_src(src) if not self.pack_src else self.collate_src_pack(src)
tgt = self.collate_tgt(tgt)
return (*src, *tgt, seqs, last)
class ScpBatchReader(ScpStreamReader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.shuffle = False
def next_partition(self):
if self.end_reading:
return 0
self.feat, self.label = [], []
feats, labels = [], []
num = 0
while num < self.max_utt:
if self.scp_pos >= len(self.scp_file):
self.end_reading = True; break
if self.scp_file[self.scp_pos].startswith('#'):
if len(feats) > 0:
self.feat.append(feats)
self.label.append(labels)
num += len(feats)
feats, labels = [], []
self.scp_pos += 1
continue
utt_id, utt_mat = self.read_next_utt()
if utt_id == '':
self.end_reading = True; break
utt_mat, utt_lbl = self.read_utt_label(utt_id, utt_mat)
if utt_lbl is None: continue
feats.append(utt_mat)
labels.append(utt_lbl)
return len(self.feat)
def next(self, batch_input=1):
src, tgt = self.feat[self.utt_index], self.label[self.utt_index]
src = self.augment_src(src)
if self.sort_src or self.pack_src:
lst = sorted(zip(src, tgt), key=lambda e : -e[0].shape[0])
src, tgt = zip(*lst)
seqs = len(src)
self.utt_index += 1
last = (self.utt_index == len(self.feat))
src = self.collate_src(src) if not self.pack_src else self.collate_src_pack(src)
tgt = self.collate_tgt(tgt)
return (*src, *tgt, seqs, last)
|
py | 1a5223df131d33c60cf65182bf7d57e147ca2f54 | from importlib import import_module
from os import path, listdir
from string import lower
from debug import logger
import paths
class MsgBase(object):
def encode(self):
self.data = {"": lower(type(self).__name__)}
def constructObject(data):
whitelist = ["message"]
if data[""] not in whitelist:
return None
try:
m = import_module("messagetypes." + data[""])
classBase = getattr(m, data[""].title())
except (NameError, ImportError):
logger.error("Don't know how to handle message type: \"%s\"", data[""], exc_info=True)
return None
try:
returnObj = classBase()
returnObj.decode(data)
except KeyError as e:
logger.error("Missing mandatory key %s", e)
return None
except:
logger.error("classBase fail", exc_info=True)
return None
else:
return returnObj
if paths.frozen is not None:
import messagetypes.message
import messagetypes.vote
else:
for mod in listdir(path.dirname(__file__)):
if mod == "__init__.py":
continue
splitted = path.splitext(mod)
if splitted[1] != ".py":
continue
try:
import_module("." + splitted[0], "messagetypes")
except ImportError:
logger.error("Error importing %s", mod, exc_info=True)
else:
logger.debug("Imported message type module %s", mod)
|
py | 1a522446c2ee75fc2c040a49a53f8453a819c5b8 | from ..Qt import QtGui, QtCore, isQObjectAlive
from ..GraphicsScene import GraphicsScene
from ..Point import Point
from .. import functions as fn
import weakref
import operator
from ..util.lru_cache import LRUCache
class GraphicsItem(object):
"""
**Bases:** :class:`object`
Abstract class providing useful methods to GraphicsObject and GraphicsWidget.
(This is required because we cannot have multiple inheritance with QObject subclasses.)
A note about Qt's GraphicsView framework:
The GraphicsView system places a lot of emphasis on the notion that the graphics within the scene should be device independent--you should be able to take the same graphics and display them on screens of different resolutions, printers, export to SVG, etc. This is nice in principle, but causes me a lot of headache in practice. It means that I have to circumvent all the device-independent expectations any time I want to operate in pixel coordinates rather than arbitrary scene coordinates. A lot of the code in GraphicsItem is devoted to this task--keeping track of view widgets and device transforms, computing the size and shape of a pixel in local item coordinates, etc. Note that in item coordinates, a pixel does not have to be square or even rectangular, so just asking how to increase a bounding rect by 2px can be a rather complex task.
"""
_pixelVectorGlobalCache = LRUCache(100, 70)
_mapRectFromViewGlobalCache = LRUCache(100, 70)
def __init__(self, register=None):
if not hasattr(self, '_qtBaseClass'):
for b in self.__class__.__bases__:
if issubclass(b, QtGui.QGraphicsItem):
self.__class__._qtBaseClass = b
break
if not hasattr(self, '_qtBaseClass'):
raise Exception('Could not determine Qt base class for GraphicsItem: %s' % str(self))
self._pixelVectorCache = [None, None]
self._viewWidget = None
self._viewBox = None
self._connectedView = None
self._exportOpts = False ## If False, not currently exporting. Otherwise, contains dict of export options.
if register is not None and register:
warnings.warn(
"'register' argument is deprecated and does nothing",
DeprecationWarning, stacklevel=2
)
def getViewWidget(self):
"""
Return the view widget for this item.
If the scene has multiple views, only the first view is returned.
The return value is cached; clear the cached value with forgetViewWidget().
If the view has been deleted by Qt, return None.
"""
if self._viewWidget is None:
scene = self.scene()
if scene is None:
return None
views = scene.views()
if len(views) < 1:
return None
self._viewWidget = weakref.ref(self.scene().views()[0])
v = self._viewWidget()
if v is not None and not isQObjectAlive(v):
return None
return v
def forgetViewWidget(self):
self._viewWidget = None
def getViewBox(self):
"""
Return the first ViewBox or GraphicsView which bounds this item's visible space.
If this item is not contained within a ViewBox, then the GraphicsView is returned.
If the item is contained inside nested ViewBoxes, then the inner-most ViewBox is returned.
The result is cached; clear the cache with forgetViewBox()
"""
if self._viewBox is None:
p = self
while True:
try:
p = p.parentItem()
except RuntimeError: ## sometimes happens as items are being removed from a scene and collected.
return None
if p is None:
vb = self.getViewWidget()
if vb is None:
return None
else:
self._viewBox = weakref.ref(vb)
break
if hasattr(p, 'implements') and p.implements('ViewBox'):
self._viewBox = weakref.ref(p)
break
return self._viewBox() ## If we made it this far, _viewBox is definitely not None
def forgetViewBox(self):
self._viewBox = None
def deviceTransform(self, viewportTransform=None):
"""
Return the transform that converts local item coordinates to device coordinates (usually pixels).
Extends deviceTransform to automatically determine the viewportTransform.
"""
if self._exportOpts is not False and 'painter' in self._exportOpts: ## currently exporting; device transform may be different.
scaler = self._exportOpts.get('resolutionScale', 1.0)
return self.sceneTransform() * QtGui.QTransform(scaler, 0, 0, scaler, 1, 1)
if viewportTransform is None:
view = self.getViewWidget()
if view is None:
return None
viewportTransform = view.viewportTransform()
dt = self._qtBaseClass.deviceTransform(self, viewportTransform)
#xmag = abs(dt.m11())+abs(dt.m12())
#ymag = abs(dt.m21())+abs(dt.m22())
#if xmag * ymag == 0:
if dt.determinant() == 0: ## occurs when deviceTransform is invalid because widget has not been displayed
return None
else:
return dt
def viewTransform(self):
"""Return the transform that maps from local coordinates to the item's ViewBox coordinates
If there is no ViewBox, return the scene transform.
Returns None if the item does not have a view."""
view = self.getViewBox()
if view is None:
return None
if hasattr(view, 'implements') and view.implements('ViewBox'):
tr = self.itemTransform(view.innerSceneItem())
if isinstance(tr, tuple):
tr = tr[0] ## difference between pyside and pyqt
return tr
else:
return self.sceneTransform()
#return self.deviceTransform(view.viewportTransform())
def getBoundingParents(self):
"""Return a list of parents to this item that have child clipping enabled."""
p = self
parents = []
while True:
p = p.parentItem()
if p is None:
break
if p.flags() & self.ItemClipsChildrenToShape:
parents.append(p)
return parents
def viewRect(self):
"""Return the visible bounds of this item's ViewBox or GraphicsWidget,
in the local coordinate system of the item."""
view = self.getViewBox()
if view is None:
return None
bounds = self.mapRectFromView(view.viewRect())
if bounds is None:
return None
bounds = bounds.normalized()
## nah.
#for p in self.getBoundingParents():
#bounds &= self.mapRectFromScene(p.sceneBoundingRect())
return bounds
def pixelVectors(self, direction=None):
"""Return vectors in local coordinates representing the width and height of a view pixel.
If direction is specified, then return vectors parallel and orthogonal to it.
Return (None, None) if pixel size is not yet defined (usually because the item has not yet been displayed)
or if pixel size is below floating-point precision limit.
"""
## This is an expensive function that gets called very frequently.
## We have two levels of cache to try speeding things up.
dt = self.deviceTransform()
if dt is None:
return None, None
## Ignore translation. If the translation is much larger than the scale
## (such as when looking at unix timestamps), we can get floating-point errors.
dt.setMatrix(dt.m11(), dt.m12(), 0, dt.m21(), dt.m22(), 0, 0, 0, 1)
if direction is None:
direction = QtCore.QPointF(1, 0)
elif direction.manhattanLength() == 0:
raise Exception("Cannot compute pixel length for 0-length vector.")
key = (dt.m11(), dt.m21(), dt.m12(), dt.m22(), direction.x(), direction.y())
## check local cache
if key == self._pixelVectorCache[0]:
return tuple(map(Point, self._pixelVectorCache[1])) ## return a *copy*
## check global cache
pv = self._pixelVectorGlobalCache.get(key, None)
if pv is not None:
self._pixelVectorCache = [key, pv]
return tuple(map(Point,pv)) ## return a *copy*
## attempt to re-scale direction vector to fit within the precision of the coordinate system
## Here's the problem: we need to map the vector 'direction' from the item to the device, via transform 'dt'.
## In some extreme cases, this mapping can fail unless the length of 'direction' is cleverly chosen.
## Example:
## dt = [ 1, 0, 2
## 0, 2, 1e20
## 0, 0, 1 ]
## Then we map the origin (0,0) and direction (0,1) and get:
## o' = 2,1e20
## d' = 2,1e20 <-- should be 1e20+2, but this can't be represented with a 32-bit float
##
## |o' - d'| == 0 <-- this is the problem.
## Perhaps the easiest solution is to exclude the transformation column from dt. Does this cause any other problems?
#if direction.x() == 0:
#r = abs(dt.m32())/(abs(dt.m12()) + abs(dt.m22()))
##r = 1.0/(abs(dt.m12()) + abs(dt.m22()))
#elif direction.y() == 0:
#r = abs(dt.m31())/(abs(dt.m11()) + abs(dt.m21()))
##r = 1.0/(abs(dt.m11()) + abs(dt.m21()))
#else:
#r = ((abs(dt.m32())/(abs(dt.m12()) + abs(dt.m22()))) * (abs(dt.m31())/(abs(dt.m11()) + abs(dt.m21()))))**0.5
#if r == 0:
#r = 1. ## shouldn't need to do this; probably means the math above is wrong?
#directionr = direction * r
directionr = direction
## map direction vector onto device
#viewDir = Point(dt.map(directionr) - dt.map(Point(0,0)))
#mdirection = dt.map(directionr)
dirLine = QtCore.QLineF(QtCore.QPointF(0,0), directionr)
viewDir = dt.map(dirLine)
if viewDir.length() == 0:
return None, None ## pixel size cannot be represented on this scale
## get unit vector and orthogonal vector (length of pixel)
#orthoDir = Point(viewDir[1], -viewDir[0]) ## orthogonal to line in pixel-space
try:
normView = viewDir.unitVector()
#normView = viewDir.norm() ## direction of one pixel orthogonal to line
normOrtho = normView.normalVector()
#normOrtho = orthoDir.norm()
except:
raise Exception("Invalid direction %s" %directionr)
## map back to item
dti = fn.invertQTransform(dt)
#pv = Point(dti.map(normView)-dti.map(Point(0,0))), Point(dti.map(normOrtho)-dti.map(Point(0,0)))
pv = Point(dti.map(normView).p2()), Point(dti.map(normOrtho).p2())
self._pixelVectorCache[1] = pv
self._pixelVectorCache[0] = dt
self._pixelVectorGlobalCache[key] = pv
return self._pixelVectorCache[1]
def pixelLength(self, direction, ortho=False):
"""Return the length of one pixel in the direction indicated (in local coordinates)
If ortho=True, then return the length of one pixel orthogonal to the direction indicated.
Return None if pixel size is not yet defined (usually because the item has not yet been displayed).
"""
normV, orthoV = self.pixelVectors(direction)
if normV == None or orthoV == None:
return None
if ortho:
return orthoV.length()
return normV.length()
def pixelSize(self):
## deprecated
v = self.pixelVectors()
if v == (None, None):
return None, None
return (v[0].x()**2+v[0].y()**2)**0.5, (v[1].x()**2+v[1].y()**2)**0.5
def pixelWidth(self):
## deprecated
vt = self.deviceTransform()
if vt is None:
return 0
vt = fn.invertQTransform(vt)
return vt.map(QtCore.QLineF(0, 0, 1, 0)).length()
def pixelHeight(self):
## deprecated
vt = self.deviceTransform()
if vt is None:
return 0
vt = fn.invertQTransform(vt)
return vt.map(QtCore.QLineF(0, 0, 0, 1)).length()
#return Point(vt.map(QtCore.QPointF(0, 1))-vt.map(QtCore.QPointF(0, 0))).length()
def mapToDevice(self, obj):
"""
Return *obj* mapped from local coordinates to device coordinates (pixels).
If there is no device mapping available, return None.
"""
vt = self.deviceTransform()
if vt is None:
return None
return vt.map(obj)
def mapFromDevice(self, obj):
"""
Return *obj* mapped from device coordinates (pixels) to local coordinates.
If there is no device mapping available, return None.
"""
vt = self.deviceTransform()
if vt is None:
return None
if isinstance(obj, QtCore.QPoint):
obj = QtCore.QPointF(obj)
vt = fn.invertQTransform(vt)
return vt.map(obj)
def mapRectToDevice(self, rect):
"""
Return *rect* mapped from local coordinates to device coordinates (pixels).
If there is no device mapping available, return None.
"""
vt = self.deviceTransform()
if vt is None:
return None
return vt.mapRect(rect)
def mapRectFromDevice(self, rect):
"""
Return *rect* mapped from device coordinates (pixels) to local coordinates.
If there is no device mapping available, return None.
"""
vt = self.deviceTransform()
if vt is None:
return None
vt = fn.invertQTransform(vt)
return vt.mapRect(rect)
def mapToView(self, obj):
vt = self.viewTransform()
if vt is None:
return None
return vt.map(obj)
def mapRectToView(self, obj):
vt = self.viewTransform()
if vt is None:
return None
return vt.mapRect(obj)
def mapFromView(self, obj):
vt = self.viewTransform()
if vt is None:
return None
vt = fn.invertQTransform(vt)
return vt.map(obj)
def mapRectFromView(self, obj):
vt = self.viewTransform()
if vt is None:
return None
cache = self._mapRectFromViewGlobalCache
k = (
vt.m11(), vt.m12(), vt.m13(),
vt.m21(), vt.m22(), vt.m23(),
vt.m31(), vt.m32(), vt.m33(),
)
try:
inv_vt = cache[k]
except KeyError:
inv_vt = fn.invertQTransform(vt)
cache[k] = inv_vt
return inv_vt.mapRect(obj)
def pos(self):
return Point(self._qtBaseClass.pos(self))
def viewPos(self):
return self.mapToView(self.mapFromParent(self.pos()))
def parentItem(self):
## PyQt bug -- some items are returned incorrectly.
return GraphicsScene.translateGraphicsItem(self._qtBaseClass.parentItem(self))
def setParentItem(self, parent):
## Workaround for Qt bug: https://bugreports.qt-project.org/browse/QTBUG-18616
if parent is not None:
pscene = parent.scene()
if pscene is not None and self.scene() is not pscene:
pscene.addItem(self)
return self._qtBaseClass.setParentItem(self, parent)
def childItems(self):
## PyQt bug -- some child items are returned incorrectly.
return list(map(GraphicsScene.translateGraphicsItem, self._qtBaseClass.childItems(self)))
def sceneTransform(self):
## Qt bug: do no allow access to sceneTransform() until
## the item has a scene.
if self.scene() is None:
return self.transform()
else:
return self._qtBaseClass.sceneTransform(self)
def transformAngle(self, relativeItem=None):
"""Return the rotation produced by this item's transform (this assumes there is no shear in the transform)
If relativeItem is given, then the angle is determined relative to that item.
"""
if relativeItem is None:
relativeItem = self.parentItem()
tr = self.itemTransform(relativeItem)
if isinstance(tr, tuple): ## difference between pyside and pyqt
tr = tr[0]
#vec = tr.map(Point(1,0)) - tr.map(Point(0,0))
vec = tr.map(QtCore.QLineF(0,0,1,0))
#return Point(vec).angle(Point(1,0))
return vec.angleTo(QtCore.QLineF(vec.p1(), vec.p1()+QtCore.QPointF(1,0)))
#def itemChange(self, change, value):
#ret = self._qtBaseClass.itemChange(self, change, value)
#if change == self.ItemParentHasChanged or change == self.ItemSceneHasChanged:
#print "Item scene changed:", self
#self.setChildScene(self) ## This is bizarre.
#return ret
#def setChildScene(self, ch):
#scene = self.scene()
#for ch2 in ch.childItems():
#if ch2.scene() is not scene:
#print "item", ch2, "has different scene:", ch2.scene(), scene
#scene.addItem(ch2)
#QtGui.QApplication.processEvents()
#print " --> ", ch2.scene()
#self.setChildScene(ch2)
def parentChanged(self):
"""Called when the item's parent has changed.
This method handles connecting / disconnecting from ViewBox signals
to make sure viewRangeChanged works properly. It should generally be
extended, not overridden."""
self._updateView()
def _updateView(self):
## called to see whether this item has a new view to connect to
## NOTE: This is called from GraphicsObject.itemChange or GraphicsWidget.itemChange.
if not hasattr(self, '_connectedView'):
# Happens when Python is shutting down.
return
## It is possible this item has moved to a different ViewBox or widget;
## clear out previously determined references to these.
self.forgetViewBox()
self.forgetViewWidget()
## check for this item's current viewbox or view widget
view = self.getViewBox()
#if view is None:
##print " no view"
#return
oldView = None
if self._connectedView is not None:
oldView = self._connectedView()
if view is oldView:
#print " already have view", view
return
## disconnect from previous view
if oldView is not None:
for signal, slot in [('sigRangeChanged', self.viewRangeChanged),
('sigDeviceRangeChanged', self.viewRangeChanged),
('sigTransformChanged', self.viewTransformChanged),
('sigDeviceTransformChanged', self.viewTransformChanged)]:
try:
getattr(oldView, signal).disconnect(slot)
except (TypeError, AttributeError, RuntimeError):
# TypeError and RuntimeError are from pyqt and pyside, respectively
pass
self._connectedView = None
## connect to new view
if view is not None:
#print "connect:", self, view
if hasattr(view, 'sigDeviceRangeChanged'):
# connect signals from GraphicsView
view.sigDeviceRangeChanged.connect(self.viewRangeChanged)
view.sigDeviceTransformChanged.connect(self.viewTransformChanged)
else:
# connect signals from ViewBox
view.sigRangeChanged.connect(self.viewRangeChanged)
view.sigTransformChanged.connect(self.viewTransformChanged)
self._connectedView = weakref.ref(view)
self.viewRangeChanged()
self.viewTransformChanged()
## inform children that their view might have changed
self._replaceView(oldView)
self.viewChanged(view, oldView)
def viewChanged(self, view, oldView):
"""Called when this item's view has changed
(ie, the item has been added to or removed from a ViewBox)"""
pass
def _replaceView(self, oldView, item=None):
if item is None:
item = self
for child in item.childItems():
if isinstance(child, GraphicsItem):
if child.getViewBox() is oldView:
child._updateView()
#self._replaceView(oldView, child)
else:
self._replaceView(oldView, child)
def viewRangeChanged(self):
"""
Called whenever the view coordinates of the ViewBox containing this item have changed.
"""
pass
def viewTransformChanged(self):
"""
Called whenever the transformation matrix of the view has changed.
(eg, the view range has changed or the view was resized)
"""
pass
#def prepareGeometryChange(self):
#self._qtBaseClass.prepareGeometryChange(self)
#self.informViewBoundsChanged()
def informViewBoundsChanged(self):
"""
Inform this item's container ViewBox that the bounds of this item have changed.
This is used by ViewBox to react if auto-range is enabled.
"""
view = self.getViewBox()
if view is not None and hasattr(view, 'implements') and view.implements('ViewBox'):
view.itemBoundsChanged(self) ## inform view so it can update its range if it wants
def childrenShape(self):
"""Return the union of the shapes of all descendants of this item in local coordinates."""
childs = self.allChildItems()
shapes = [self.mapFromItem(c, c.shape()) for c in self.allChildItems()]
return reduce(operator.add, shapes)
def allChildItems(self, root=None):
"""Return list of the entire item tree descending from this item."""
if root is None:
root = self
tree = []
for ch in root.childItems():
tree.append(ch)
tree.extend(self.allChildItems(ch))
return tree
def setExportMode(self, export, opts=None):
"""
This method is called by exporters to inform items that they are being drawn for export
with a specific set of options. Items access these via self._exportOptions.
When exporting is complete, _exportOptions is set to False.
"""
if opts is None:
opts = {}
if export:
self._exportOpts = opts
#if 'antialias' not in opts:
#self._exportOpts['antialias'] = True
else:
self._exportOpts = False
#def update(self):
#self._qtBaseClass.update(self)
#print "Update:", self
def getContextMenus(self, event):
return [self.getMenu()] if hasattr(self, "getMenu") else []
|
py | 1a5225ccc7e6a3fe21d8c4f20a73a17573fe76ca | import pytest
from thedarn.rules.gradle_wrapper import match, get_new_command
from thedarn.types import Command
@pytest.fixture(autouse=True)
def exists(mocker):
return mocker.patch('thedarn.rules.gradle_wrapper.os.path.isfile',
return_value=True)
@pytest.mark.parametrize('command', [
Command('gradle tasks', 'gradle: not found'),
Command('gradle build', 'gradle: not found')])
def test_match(mocker, command):
mocker.patch('thedarn.rules.gradle_wrapper.which', return_value=None)
assert match(command)
@pytest.mark.parametrize('command, gradlew, which', [
(Command('gradle tasks', 'gradle: not found'), False, None),
(Command('gradle tasks', 'command not found'), True, '/usr/bin/gradle'),
(Command('npm tasks', 'npm: not found'), True, None)])
def test_not_match(mocker, exists, command, gradlew, which):
mocker.patch('thedarn.rules.gradle_wrapper.which', return_value=which)
exists.return_value = gradlew
assert not match(command)
@pytest.mark.parametrize('script, result', [
('gradle assemble', './gradlew assemble'),
('gradle --help', './gradlew --help'),
('gradle build -c', './gradlew build -c')])
def test_get_new_command(script, result):
command = Command(script, '')
assert get_new_command(command) == result
|
py | 1a5226c1eb3f026def93cbcf0f99ed6a7257045f | #!/usr/bin/env python2
# coding=utf-8
# ^^^^^^^^^^^^ TODO remove when supporting only Python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletTest (BitcoinTestFramework):
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
target_fee = fee_per_byte * tx_size
if fee < target_fee:
raise AssertionError("Fee of %s VEDA too low! (Should be %s VEDA)"%(str(fee), str(target_fee)))
# allow the node's estimation to be at most 2 bytes off
if fee > fee_per_byte * (tx_size + 2):
raise AssertionError("Fee of %s VEDA too high! (Should be %s VEDA)"%(str(fee), str(target_fee)))
return curr_balance
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
print "Mining blocks..."
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 500)
assert_equal(walletinfo['balance'], 0)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 500)
assert_equal(self.nodes[1].getbalance(), 500)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
assert_equal(len(self.nodes[0].listunspent()), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
# Send 210 VEDA from 0 to 2 using sendtoaddress call.
# Second transaction will be child of first, and will require a fee
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 110)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 100)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all()
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises(JSONRPCException, self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 200)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all()
# node0 should end up with 1000 VEDA in block rewards plus fees, but
# minus the 210 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 1000-210)
assert_equal(self.nodes[2].getbalance(), 210)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 1000)
assert_equal(self.nodes[2].getbalance("from1"), 1000-210)
# Send 100 VEDA normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.00001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 100, "", "", False)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('900'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), Decimal('100'))
# Send 100 VEDA with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 100, "", "", True)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('100')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('200'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Sendmany 100 VEDA
txid = self.nodes[2].sendmany('from1', {address: 100}, 0, False, "", [])
self.nodes[2].generate(1)
self.sync_all()
node_0_bal += Decimal('100')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('100'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 100 VEDA with subtract fee from amount
txid = self.nodes[2].sendmany('from1', {address: 100}, 0, False, "", [address])
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('100')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('100'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Test ResendWalletTransactions:
# Create a couple of transactions, then start up a fourth
# node (nodes[3]) and ask nodes[0] to rebroadcast.
# EXPECT: nodes[3] should have those transactions in its mempool.
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
sync_mempools(self.nodes)
self.nodes.append(start_node(3, self.options.tmpdir))
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
relayed = self.nodes[0].resendwallettransactions()
assert_equal(set(relayed), {txid1, txid2})
sync_mempools(self.nodes)
assert(txid1 in self.nodes[3].getrawmempool())
# Exercise balance rpcs
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1)
assert_equal(self.nodes[0].getunconfirmedbalance(), 1)
#check if we can list zero value tx as available coins
#1. create rawtx
#2. hex-changed one output to 0.0
#3. sign and send
#4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent()
inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
outputs = {self.nodes[1].getnewaddress(): 499.998, self.nodes[0].getnewaddress(): 11.11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
decRawTx = self.nodes[1].decoderawtransaction(rawTx)
signedRawTx = self.nodes[1].signrawtransaction(rawTx)
decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
zeroValueTxid= decRawTx['txid']
sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex'])
self.sync_all()
self.nodes[1].generate(1) #mine a block
self.sync_all()
unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output
found = False
for uTx in unspentTxs:
if uTx['txid'] == zeroValueTxid:
found = True
assert_equal(uTx['amount'], Decimal('0'))
assert(found)
#do some -walletbroadcast tests
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0"],["-walletbroadcast=0"],["-walletbroadcast=0"]])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all()
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
self.nodes[1].generate(1) #mine a block, tx should not be in there
self.sync_all()
assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted
#now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
self.nodes[1].generate(1)
self.sync_all()
node_2_bal += 2
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#create another tx
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
#restart the nodes with -walletbroadcast=1
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
sync_blocks(self.nodes)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
node_2_bal += 2
#tx should be added to balance because after restarting the nodes tx should be broadcastet
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#send a tx with value in a string (PR#6380 +)
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-2'))
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
#check if JSON parser can handle scientific notation in strings
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
try:
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1f-4")
except JSONRPCException as e:
assert("Invalid amount" in e.error['message'])
else:
raise AssertionError("Must not parse invalid amounts")
try:
self.nodes[0].generate("2")
raise AssertionError("Must not accept strings as numeric")
except JSONRPCException as e:
assert("not an integer" in e.error['message'])
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
txid = self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all()
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"])
# 4. Check that the unspents after import are not spendable
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
#check if wallet or blochchain maintenance changes the balance
self.sync_all()
blocks = self.nodes[0].generate(2)
self.sync_all()
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
# Check modes:
# - True: unicode escaped as \u....
# - False: unicode directly as UTF-8
for mode in [True, False]:
self.nodes[0].ensure_ascii = mode
# unicode check: Basic Multilingual Plane, Supplementary Plane respectively
for s in [u'????????', u'????']:
addr = self.nodes[0].getaccountaddress(s)
label = self.nodes[0].getaccount(addr)
assert_equal(label.encode('utf-8'), s.encode('utf-8')) # TODO remove encode(...) when supporting only Python3
assert(s in self.nodes[0].listaccounts().keys())
self.nodes[0].ensure_ascii = True # restore to default
# maintenance tests
maintenance = [
'-rescan',
'-reindex',
'-zapwallettxes=1',
'-zapwallettxes=2',
'-salvagewallet',
]
for m in maintenance:
print "check " + m
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir, [[m]] * 3)
while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]:
# reindex will leave rpc warm up "early"; Wait for it to finish
time.sleep(0.1)
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
if __name__ == '__main__':
WalletTest ().main ()
|
py | 1a52285316a74c986a7f3a7d18ed74c3af8e696e | import pygame
aktualnie_wyswietlane = [[0 for col in range(14)] for row in range(5)]
aktualna_podpowiedz = [[0 for col_ in range(14)] for row_ in range(5)]
class Kod(object):
def main(self, aktualny_kolor):
Kod.reset(self)
self.pos = pygame.mouse.get_pos()
if self.stop == 0:
if aktualnie_wyswietlane[1][self.wiersz] and aktualnie_wyswietlane[2][self.wiersz] and aktualnie_wyswietlane[3][self.wiersz] and aktualnie_wyswietlane[4][self.wiersz]:
enter = pygame.key.get_pressed()
if enter[13]:
self.enter += 1
Kod.wyswietlanie_pytajnikow(self)
Kod.wyswietlanie_kulek(self)
if aktualny_kolor == 6:
x_1, x_2 = 77, 111
if aktualny_kolor == 7:
x_1, x_2 = 57, 89
if aktualny_kolor == 8:
x_1, x_2 = 37, 70
x_pole_1, x_pole_2 = 156, 190
if self.click[0]:
pozycja = pygame.mouse.get_pos()
else:
pozycja = (-1, -1)
for pole in range(1, 5):
if pozycja[0] in range(x_pole_1, x_pole_2) and pozycja[1] in range(self.y_1, self.y_1 + 35):
self.click_pole = pole
x_pole_1 += 58
x_pole_2 += 58
# print(self.click_pole)
for kulka in range(1, aktualny_kolor + 1):
if pozycja[0] in range(x_1, x_2) and pozycja[1] in range(665, 700):
aktualnie_wyswietlane[self.click_pole][self.wiersz] = kulka
x_1 += 46
x_2 += 46
def kod_gry(self, aktualny_kolor):
self.kod_los = self.twoj_kod
dodatkowe_2 = 0
sprawdzenie = [0, 0, 0, 0]
for wiersz in range(0, self.click_runda * 2 + 6):
index = 1
tmp = [0, 0, 0, 0]
self.podpowiedz = []
if self.enter == wiersz:
self.podpowiedz.append(0)
if aktualnie_wyswietlane[1][wiersz + 1] == self.kod_los[0]:
self.podpowiedz.append(1)
index += 1
tmp.append(self.kod_los[0])
if aktualnie_wyswietlane[2][wiersz + 1] == self.kod_los[1]:
self.podpowiedz.append(1)
index += 1
tmp.append(self.kod_los[1])
if aktualnie_wyswietlane[3][wiersz + 1] == self.kod_los[2]:
self.podpowiedz.append(1)
index += 1
tmp.append(self.kod_los[2])
if aktualnie_wyswietlane[4][wiersz + 1] == self.kod_los[3]:
self.podpowiedz.append(1)
index += 1
tmp.append(self.kod_los[3])
self.kod_los_blad = self.kod_los.copy()
for kol_2 in range(1, 5):
sprawdzenie[kol_2-1] = aktualnie_wyswietlane[kol_2][wiersz + 1]
for kol in range(1, 5):
if sprawdzenie.count(aktualnie_wyswietlane[kol][wiersz + 1]) > tmp.count(aktualnie_wyswietlane[kol][wiersz + 1]):
dodatkowe_2 = self.kod_los_blad.count(aktualnie_wyswietlane[kol][wiersz + 1]) - tmp.count(aktualnie_wyswietlane[kol][wiersz + 1])
sprawdzenie.remove(aktualnie_wyswietlane[kol][wiersz + 1])
if dodatkowe_2 or (aktualnie_wyswietlane[kol][wiersz + 1] in self.kod_los_blad and not aktualnie_wyswietlane[kol][wiersz + 1] in tmp):
self.podpowiedz.append(2)
if self.kod_los_blad.count(aktualnie_wyswietlane[kol][wiersz + 1]):
self.kod_los_blad.remove(aktualnie_wyswietlane[kol][wiersz + 1])
dodatkowe_2 = 0
#print("podp=",self.podpowiedz, "tmp=",tmp, "sprawdz=",sprawdzenie, "blad=",self.kod_los_blad)
while index <= 5:
self.podpowiedz.append(0)
index += 1
for kolumna in range(1, 5):
if wiersz == self.enter and self.podpowiedz[kolumna] == 0:
aktualna_podpowiedz[kolumna][wiersz + 1] = 0
if self.podpowiedz[kolumna] == 1:
aktualna_podpowiedz[kolumna][wiersz + 1] = 1
if self.podpowiedz[kolumna] == 2:
aktualna_podpowiedz[kolumna][wiersz + 1] = 2
Kod.wyswietlanie_podpowiedzi(self)
Kod.czy_wygrana(self)
def wyswietlanie_kulek(self):
czerwona = pygame.image.load("Obrazy/kulki/czerwona.png")
zielona = pygame.image.load("Obrazy/kulki/zielona.png")
niebieska = pygame.image.load("Obrazy/kulki/niebieska.png")
blekitna = pygame.image.load("Obrazy/kulki/blekitna.png")
rozowa = pygame.image.load("Obrazy/kulki/rozowa.png")
zolta = pygame.image.load("Obrazy/kulki/zolta.png")
szara = pygame.image.load("Obrazy/kulki/szara.png")
czarna = pygame.image.load("Obrazy/kulki/czarna.png")
self.y = 571
for wysokosc in range(1, 14):
x = 156
for xz in range(1, 5):
if aktualnie_wyswietlane[xz][wysokosc] == 1:
self.screen.blit(czerwona, (x, self.y))
elif aktualnie_wyswietlane[xz][wysokosc] == 2:
self.screen.blit(zielona, (x, self.y))
elif aktualnie_wyswietlane[xz][wysokosc] == 3:
self.screen.blit(niebieska, (x, self.y))
elif aktualnie_wyswietlane[xz][wysokosc] == 4:
self.screen.blit(blekitna, (x, self.y))
elif aktualnie_wyswietlane[xz][wysokosc] == 5:
self.screen.blit(rozowa, (x, self.y))
elif aktualnie_wyswietlane[xz][wysokosc] == 6:
self.screen.blit(zolta, (x, self.y))
elif aktualnie_wyswietlane[xz][wysokosc] == 7:
self.screen.blit(szara, (x, self.y))
elif aktualnie_wyswietlane[xz][wysokosc] == 8:
self.screen.blit(czarna, (x, self.y))
x += 58
self.y -= 50
def wyswietlanie_pytajnikow(self):
pytajnik = pygame.image.load("Obrazy/kulki/pytajnik2.png")
for wiersz_2 in range(1, self.click_runda * 2 + 6):
if self.enter == wiersz_2:
self.y_1 = 571 - 50 * wiersz_2
self.wiersz = wiersz_2 + 1 # self wiersz - ktora linijka po enterze
self.screen.blit(pytajnik, (156, 571 - 50 * wiersz_2))
self.screen.blit(pytajnik, (214, 571 - 50 * wiersz_2))
self.screen.blit(pytajnik, (272, 571 - 50 * wiersz_2))
self.screen.blit(pytajnik, (330, 571 - 50 * wiersz_2))
elif self.enter == 0:
self.screen.blit(pytajnik, (156, 571))
self.screen.blit(pytajnik, (214, 571))
self.screen.blit(pytajnik, (272, 571))
self.screen.blit(pytajnik, (330, 571))
def wyswietlanie_podpowiedzi(self):
mala_czarna = pygame.image.load("Obrazy/kulki/mala_czarna.png")
mala_biala = pygame.image.load("Obrazy/kulki/mala_biala.png")
for wysokos_2 in range(1, 14):
if self.enter + 1 == wysokos_2:
continue
if aktualna_podpowiedz[1][wysokos_2] == 1:
self.screen.blit(mala_czarna, (37, 623 - 50 * wysokos_2))
elif aktualna_podpowiedz[1][wysokos_2] == 2:
self.screen.blit(mala_biala, (37, 623 - 50 * wysokos_2))
if aktualna_podpowiedz[2][wysokos_2] == 1:
self.screen.blit(mala_czarna, (61, 623 - 50 * wysokos_2))
elif aktualna_podpowiedz[2][wysokos_2] == 2:
self.screen.blit(mala_biala, (61, 623 - 50 * wysokos_2))
if aktualna_podpowiedz[3][wysokos_2] == 1:
self.screen.blit(mala_czarna, (37, 647 - 50 * wysokos_2))
elif aktualna_podpowiedz[3][wysokos_2] == 2:
self.screen.blit(mala_biala, (37, 647 - 50 * wysokos_2))
if aktualna_podpowiedz[4][wysokos_2] == 1:
self.screen.blit(mala_czarna, (61, 647 - 50 * wysokos_2))
elif aktualna_podpowiedz[4][wysokos_2] == 2:
self.screen.blit(mala_biala, (61, 647 - 50 * wysokos_2))
def czy_wygrana(self):
wygrana_w_ostatiej = 0
for wiersz in range(1, 14):
if aktualna_podpowiedz[1][wiersz] == 1 and aktualna_podpowiedz[2][wiersz] == 1 :
if aktualna_podpowiedz[3][wiersz] == 1 and aktualna_podpowiedz[4][wiersz] == 1:
if self.enter + 1 == wiersz:
continue
wygrana_w_ostatiej = 1
wygrana = pygame.image.load("Obrazy/wygrana.png")
for q in range(1,5):
for p in range(0, self.click_runda * 2 + 7):
aktualnie_wyswietlane[q][p] = self.kod_los[q - 1]
self.screen.blit(wygrana, (0, 300))
self.stop = 1
if self.wstecz == 1:
self.stop = 0
self.aktualnie_wyswietlane = 1
self.mozna_grac = 0
self.twoj_kod = [0, 0, 0, 0]
self.reset = 1
Kod.reset(self)
if self.enter == self.click_runda * 2 + 6 and wygrana_w_ostatiej == 0:
przegrana = pygame.image.load("Obrazy/przegrana.png")
self.screen.blit(przegrana, (0, 300))
for q in range(1, 5):
for p in range(0, self.click_runda * 2 + 7):
aktualnie_wyswietlane[q][p] = self.kod_los[q - 1]
self.stop = 1
if self.wstecz == 1:
self.stop = 0
self.aktualnie_wyswietlane = 1
self.reset = 1
Kod.reset(self)
def reset(self):
if self.reset == 1:
for iksy in range(1, 5):
for igreki in range(1, 14):
aktualnie_wyswietlane[iksy][igreki] = 0
aktualna_podpowiedz[iksy][igreki] = 0
self.click_vs = 0
self.click_runda = 0
self.aktualny_kolor = 0
self.aktualny_vs = 0
self.click_kolor = 0
self.wstecz = 0
self.click_miejsce = 0
self.click = pygame.mouse.get_pressed()
self.click_pole = 0
self.enter = 0
self.y_1 = 571
self.yz = 1
self.wiersz = 1
self.kod_los = [0, 0, 0, 0]
self.reset = 0
self.mozna_grac = 0
self.twoj_kod = [0, 0, 0, 0]
self.mozna_grac = 0
class Ustawianie_Kodu(object):
def main(self, ilosc_kolorow):
ustawiony_kod = pygame.image.load("Obrazy/ukladanie_kodu.png")
self.screen.blit(ustawiony_kod, (0,0))
if ilosc_kolorow == 6:
Ustawianie_Kodu.kulki_szesc(self)
x_1, x_2 = 77, 111
elif ilosc_kolorow == 7:
Ustawianie_Kodu.kulki_siedem(self)
x_1, x_2 = 57, 89
elif ilosc_kolorow == 8:
Ustawianie_Kodu.kulki_osiem(self)
x_1, x_2 = 37, 70
x_pole_1, x_pole_2 = 122, 155
if self.click[0]:
pozycja = pygame.mouse.get_pos()
else:
pozycja = (-1, -1)
# Klikanie na twoj kod
for pole in range(0, 4):
if pozycja[0] in range(x_pole_1, x_pole_2) and pozycja[1] in range(546, 580):
self.click_pole = pole
x_pole_1 += 49
x_pole_2 += 49
# klikanie na liste kolorow
for kulka in range(1, ilosc_kolorow + 1):
if pozycja[0] in range(x_1, x_2) and pozycja[1] in range(665, 700):
self.twoj_kod[self.click_pole] = kulka
x_1 += 46
x_2 += 46
czerwona = pygame.image.load("Obrazy/kulki/czerwona.png")
zielona = pygame.image.load("Obrazy/kulki/zielona.png")
niebieska = pygame.image.load("Obrazy/kulki/niebieska.png")
blekitna = pygame.image.load("Obrazy/kulki/blekitna.png")
rozowa = pygame.image.load("Obrazy/kulki/rozowa.png")
zolta = pygame.image.load("Obrazy/kulki/zolta.png")
szara = pygame.image.load("Obrazy/kulki/szara.png")
czarna = pygame.image.load("Obrazy/kulki/czarna.png")
x = 122
for numer in range(4):
if self.twoj_kod[numer] == 1:
self.screen.blit(czerwona, (x, 546))
if self.twoj_kod[numer] == 2:
self.screen.blit(zielona, (x, 546))
if self.twoj_kod[numer] == 3:
self.screen.blit(niebieska, (x, 546))
if self.twoj_kod[numer] == 4:
self.screen.blit(blekitna, (x, 546))
if self.twoj_kod[numer] == 5:
self.screen.blit(rozowa, (x, 546))
if self.twoj_kod[numer] == 6:
self.screen.blit(zolta, (x, 546))
if self.twoj_kod[numer] == 7:
self.screen.blit(szara, (x, 546))
if self.twoj_kod[numer] == 8:
self.screen.blit(czarna, (x, 546))
x += 49
if self.stop == 0:
if self.twoj_kod[0] and self.twoj_kod[1] and self.twoj_kod[2] and self.twoj_kod[3]:
enter = pygame.key.get_pressed()
if enter[13]:
self.mozna_grac = 1
if self.wstecz == 1:
wyjscie = pygame.image.load("Obrazy/wyjsc.png")
self.screen.blit(wyjscie, (0, 300))
self.stop = 1
def kulki_szesc(self):
k_6 = pygame.image.load("Obrazy/kulki_6.png")
self.screen.blit(k_6, (62, 650))
def kulki_siedem(self):
k_7 = pygame.image.load("Obrazy/kulki_7.png")
self.screen.blit(k_7, (40, 650))
def kulki_osiem(self):
k_8 = pygame.image.load("Obrazy/kulki_8.png")
self.screen.blit(k_8, (21, 650)) |
py | 1a5228e38a7b46b4e2cbfa8b4876e09acbdeb7c1 | #! /usr/bin/python
'''
Bookmarks.py - Main Executable
- Identifies current BBC programmes and generates keywords based on them.
- Collects Twitter streaming API data based on generated keywords.
- Analyses the collected data to identify frequent words, hence allowing the web interface to generate bookmarks.
'''
### Danger area: Adding OAuth to both Twitter components will result in them both trying to renew the received key and secret
### To avoid this, there needs to be a way to pass received keys and secrets to components needing them before they try to make requests too.
### Also need to farm out access to config file from OAuth utilising components so they're more generic
# This program requires a config based on the included twitter-login.conf.dist saving to /home/<yourusername>/twitter-login.conf
# During the running of the program, it will create a file called tempRDF.txt in the running directory
# It will also create files called namecache.conf, linkcache.conf and oversizedtweets.conf in your home directory
# See the README for more information
# Before we do anything.
# First check to see if we're suppose to be even running. If we're not, don't start!
import os
import sys
from Kamaelia.Apps.SocialBookmarks.Print import Print
# Before we do anything.
# First check to see if we're suppose to be even running. If we're not, don't start!
if os.path.exists(os.path.join(os.path.expanduser("~"), "stop_bookmarks")):
Print("Exitting bookmarks because ~/stop_bookmarks exists")
start = False
sys.exit(0)
else:
start = True
# import Axon
# Axon.Box.ShowAllTransits = True
if start and (__name__ == "__main__"):
import cjson
from Kamaelia.Apps.SocialBookmarks.BBCProgrammes import WhatsOn
from Kamaelia.Apps.SocialBookmarks.DataCollector import DataCollector, RawDataCollector
from Kamaelia.Chassis.Graphline import Graphline
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Util.TwoWaySplitter import TwoWaySplitter
from Kamaelia.Apps.SocialBookmarks.LiveAnalysis import FinalAnalysisNLTK, LiveAnalysis, LiveAnalysisNLTK
from Kamaelia.Apps.SocialBookmarks.Requester import Requester
from Kamaelia.Apps.SocialBookmarks.TweetFixer import LinkResolver, RetweetCorrector, RetweetFixer, TweetCleaner
from Kamaelia.Apps.SocialBookmarks.TwitterSearch import PeopleSearch
from Kamaelia.Apps.SocialBookmarks.TwitterStream import TwitterStream
from Kamaelia.Apps.SocialBookmarks.URLGetter import HTTPGetter
from Kamaelia.Apps.SocialBookmarks.StopOnFile import StopOnFile
if 0:
from Kamaelia.Apps.MH.Profiling import FormattedProfiler
from Kamaelia.Util.Console import ConsoleEchoer
from Kamaelia.Util.Pipeline import Pipeline
Pipeline( FormattedProfiler(10.0,1.0), ConsoleEchoer(), ).activate()
StopOnFile(stopfile=os.path.join(os.path.expanduser("~"), "stop_bookmarks")).activate()
# Load Config
try:
homedir = os.path.expanduser("~")
file = open(homedir + "/twitter-login.conf")
except IOError, e:
print ("Failed to load login data - exiting")
sys.exit(0)
raw_config = file.read()
file.close()
# Read Config
config = cjson.decode(raw_config)
username = config['username']
password = config['password']
dbuser = config['dbuser']
dbpass = config['dbpass']
bitlyusername = config['bitlyusername']
bitlyapikey = config['bitlyapikey']
# This is a bit of a nasty hack, and really all this stuff should be picked up from a config system
# Unfortunately no such config system exists, so we'll have to work through this way for the moment
import Kamaelia.Apps.SocialBookmarks.DBWrapper
Kamaelia.Apps.SocialBookmarks.DBWrapper.DBWrapper.dbuser = dbuser
Kamaelia.Apps.SocialBookmarks.DBWrapper.DBWrapper.dbpass = dbpass
Kamaelia.Apps.SocialBookmarks.DBWrapper.DBWrapper.maindb = "twitter_bookmarks"
Kamaelia.Apps.SocialBookmarks.DBWrapper.DBWrapper.nextdb = "twitter_bookmarks_next"
# Set proxy server if available
if config.has_key('proxy'):
proxy = config['proxy']
else:
proxy = False
# Set OAuth consumer keypair
consumerkeypair = [config['consumerkey'],config['consumersecret']]
# Set OAuth secret keypair if available - if not it will be sourced from Twitter
if config.has_key('key') and config.has_key('secret'):
keypair = [config['key'],config['secret']]
else:
keypair = False
# Linker component for LiveAnalysis
LINKER = Graphline(LINKRESOLVE = LinkResolver(bitlyusername,bitlyapikey),
LINKREQUESTER = HTTPGetter(proxy, "BBC R&D Grabber", 10),
linkages = {("self", "inbox") : ("LINKRESOLVE", "inbox"),
("LINKRESOLVE", "outbox") : ("self", "outbox"),
("LINKRESOLVE", "urlrequests") : ("LINKREQUESTER", "inbox"),
("LINKREQUESTER", "outbox") : ("LINKRESOLVE", "responses")}).activate()
# Linker component for FinalAnalysis
# This duplication could probably be avoided by doing some tagging/filtering TODO
LINKERFINAL = Graphline(LINKRESOLVE = LinkResolver(bitlyusername,bitlyapikey),
LINKREQUESTER = HTTPGetter(proxy, "BBC R&D Grabber", 10),
linkages = {("self", "inbox") : ("LINKRESOLVE", "inbox"),
("LINKRESOLVE", "outbox") : ("self", "outbox"),
("LINKRESOLVE", "urlrequests") : ("LINKREQUESTER", "inbox"),
("LINKREQUESTER", "outbox") : ("LINKRESOLVE", "responses")}).activate()
system = Graphline(CURRENTPROG = WhatsOn(proxy),
REQUESTER = Requester("all",dbuser,dbpass), # Can set this for specific channels to limit Twitter requests whilst doing dev
FIREHOSE = TwitterStream(username, password, proxy, True, 40), # Twitter API sends blank lines every 30 secs so timeout of 40 should be fine
SEARCH = PeopleSearch(consumerkeypair, keypair, proxy),
COLLECTOR = DataCollector(dbuser,dbpass),
RAWCOLLECTOR = RawDataCollector(dbuser,dbpass),
HTTPGETTER = HTTPGetter(proxy, "BBC R&D Grabber", 10),
HTTPGETTERRDF = HTTPGetter(proxy, "BBC R&D Grabber", 10),
TWOWAY = TwoWaySplitter(),
ANALYSIS = LiveAnalysis(dbuser,dbpass),
NLTKANALYSIS = LiveAnalysisNLTK(dbuser,dbpass),
TWEETCLEANER = Pipeline(LINKER,RetweetFixer(),RetweetCorrector(dbuser,dbpass),TweetCleaner(['user_mentions','urls','hashtags'])),
NLTKANALYSISFINAL = FinalAnalysisNLTK(dbuser,dbpass),
TWEETCLEANERFINAL = Pipeline(LINKERFINAL,RetweetFixer(),RetweetCorrector(dbuser,dbpass),TweetCleaner(['user_mentions','urls','hashtags'])),
linkages = {("REQUESTER", "whatson") : ("CURRENTPROG", "inbox"), # Request what's currently broadcasting
("CURRENTPROG", "outbox") : ("REQUESTER", "whatson"), # Pass back results of what's on
("REQUESTER", "outbox") : ("FIREHOSE", "inbox"), # Send generated keywords to Twitter streaming API
("FIREHOSE", "outbox") : ("TWOWAY" , "inbox"),
("TWOWAY", "outbox") : ("COLLECTOR" , "inbox"),
("TWOWAY", "outbox2") : ("RAWCOLLECTOR" , "inbox"),
("REQUESTER", "search") : ("SEARCH", "inbox"), # Perform Twitter people search based on keywords
("SEARCH", "outbox") : ("REQUESTER", "search"), # Return Twitter people search results
("REQUESTER", "dataout") : ("HTTPGETTERRDF", "inbox"),
("CURRENTPROG", "dataout") : ("HTTPGETTER", "inbox"),
("HTTPGETTER", "outbox") : ("CURRENTPROG", "datain"),
("HTTPGETTERRDF", "outbox") : ("REQUESTER", "datain"),
("ANALYSIS", "nltk") : ("NLTKANALYSIS", "inbox"),
("NLTKANALYSIS", "outbox") : ("ANALYSIS", "nltk"),
("NLTKANALYSIS", "tweetfixer") : ("TWEETCLEANER", "inbox"),
("TWEETCLEANER", "outbox") : ("NLTKANALYSIS", "tweetfixer"),
("ANALYSIS", "nltkfinal") : ("NLTKANALYSISFINAL", "inbox"),
("NLTKANALYSISFINAL", "outbox") : ("ANALYSIS", "nltkfinal"),
("NLTKANALYSISFINAL", "tweetfixer") : ("TWEETCLEANERFINAL", "inbox"),
("TWEETCLEANERFINAL", "outbox") : ("NLTKANALYSISFINAL", "tweetfixer"),
}
).run()
|
py | 1a52294f17c353b24c68e5aed92b8148cda0655b |
# books.py #
from dataclasses import dataclass
from sqlalchemy import Column, Integer, String, Date
from db.base import Base
from sqlalchemy.dialects.postgresql import MONEY
@dataclass
class Book(Base):
__tablename__ = 'books'
id = Column(Integer, primary_key=True)
title = Column(String)
author = Column(String)
pages = Column(Integer)
published = Column(Date)
price = Column(MONEY)
def __repr__(self):
return "<Book(title='{}', author='{}', pages={}, published={})>"\
.format(self.title, self.author, self.pages, self.published)
|
py | 1a522a9ee4313efea15f5df0936fb5f894b6653b | import setuptools
#README as long_descriptions
with open("README.md", "r") as readme:
long_description = readme.read()
setuptools.setup(
name='automatedweb',
version='1.0.2',
url='https://github.com/renanleonellocastro/automatedweb.git',
license='MIT License',
author='Renan Leonello Castro',
author_email='[email protected]',
keywords='automatedweb web automated post get requests rest restfull',
description='A tool to make it easy to communicate with web systems writting and reading data from them',
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
py_modules=["automatedweb"],
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
install_requires=[
"requests>=2.22.0",
"pyquery>=1.4.0",
"json5>=0.8.5",
"urllib3>=1.25.10",
],
)
|
py | 1a522aa0667d28677190ddeab1badf1be2c25181 | # Generated by Django 2.2.3 on 2019-07-12 14:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('TeamXapp', '0046_auto_20190712_1455'),
]
operations = [
migrations.AlterField(
model_name='allmembers',
name='scrum_team_name',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='TeamXapp.ScrumTeam', verbose_name='Scrum team: '),
),
migrations.AlterField(
model_name='allmembers',
name='scrum_team_roles',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='TeamXapp.ScrumTeamRole', verbose_name='Scrum Team Roles: '),
),
]
|
py | 1a522c206875af891eda90798a71d427744c65ba | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
idp_table = sql.Table(
'identity_provider',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('enabled', sql.Boolean, nullable=False),
sql.Column('description', sql.Text(), nullable=True))
idp_table.create(migrate_engine, checkfirst=True)
federation_protocol_table = sql.Table(
'federation_protocol',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('idp_id', sql.String(64),
sql.ForeignKey('identity_provider.id', ondelete='CASCADE'),
primary_key=True),
sql.Column('mapping_id', sql.String(64), nullable=True))
federation_protocol_table.create(migrate_engine, checkfirst=True)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
tables = ['identity_provider', 'federation_protocol']
for table_name in tables:
table = sql.Table(table_name, meta, autoload=True)
table.drop()
|
py | 1a522c9d4f3a0804a42f992016337c304aa28d55 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.forms import SlugField
from django.test import SimpleTestCase
class SlugFieldTest(SimpleTestCase):
def test_slugfield_normalization(self):
f = SlugField()
self.assertEqual(f.clean(' aa-bb-cc '), 'aa-bb-cc')
def test_slugfield_unicode_normalization(self):
f = SlugField(allow_unicode=True)
self.assertEqual(f.clean('a'), 'a')
self.assertEqual(f.clean('1'), '1')
self.assertEqual(f.clean('a1'), 'a1')
self.assertEqual(f.clean('你好'), '你好')
self.assertEqual(f.clean(' 你-好 '), '你-好')
self.assertEqual(f.clean('ıçğüş'), 'ıçğüş')
self.assertEqual(f.clean('foo-ıç-bar'), 'foo-ıç-bar')
|
py | 1a522e02cb6b60c7dadb4cc08b500040e91c0164 | import argparse
import collections
import json
import os
import re
import string
import sys
from copy import deepcopy
from bs4 import BeautifulSoup
class EvalOpts:
r"""
The options which the matrix evaluation process needs.
Arguments:
data_file (str): the SQuAD-style json file of the dataset in evaluation.
root_dir (str): the root directory of the raw WebSRC dataset, which contains the HTML files.
pred_file (str): the prediction file which contain the best predicted answer text of each question from the
model.
tag_pred_file (str): the prediction file which contain the best predicted answer tag id of each question from
the model.
result_file (str): the file to write down the matrix evaluation results of each question.
out_file (str): the file to write down the final matrix evaluation results of the whole dataset.
"""
def __init__(self, data_file, root_dir, pred_file, tag_pred_file, result_file='', out_file=""):
self.data_file = data_file
self.root_dir = root_dir
self.pred_file = pred_file
self.tag_pred_file = tag_pred_file
self.result_file = result_file
self.out_file = out_file
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('data_file', metavar='data.json', help='Input data JSON file.')
parser.add_argument('root_dir', metavar='./data', help='The root directory of the raw WebSRC dataset')
parser.add_argument('pred_file', metavar='pred.json', help='Model predictions.')
parser.add_argument('tag_pred_file', metavar='tag_pred.json', help='Model predictions.')
parser.add_argument('--result-file', '-r', metavar='qas_eval.json')
parser.add_argument('--out-file', '-o', metavar='eval.json',
help='Write accuracy metrics to file (default is stdout).')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def make_pages_list(dataset):
r"""
Record all the pages which appears in the dataset and return the list.
"""
pages_list = []
last_page = None
for domain in dataset:
for w in domain['websites']:
for qa in w['qas']:
if last_page != qa['id'][:4]:
last_page = qa['id'][:4]
pages_list.append(last_page)
return pages_list
def make_qid_to_has_ans(dataset):
r"""
Pick all the questions which has answer in the dataset and return the list.
"""
qid_to_has_ans = {}
for domain in dataset:
for w in domain['websites']:
for qa in w['qas']:
qid_to_has_ans[qa['id']] = bool(qa['answers'])
return qid_to_has_ans
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
r"""
Get the word list in the input.
"""
if not s:
return []
return normalize_answer(s).split()
def compute_exact(a_gold, a_pred):
r"""
Calculate the exact match.
"""
if normalize_answer(a_gold) == normalize_answer(a_pred):
return 1
return 0
def compute_f1(a_gold, a_pred):
r"""
Calculate the f1 score.
"""
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def compute_pos(f, t_gold, addition, t_pred):
r"""
Calculate the POS score.
Arguments:
f (str): the html file on which the question is based.
t_gold (int): the gold answer tag id provided by the dataset (the value correspond to the key element_id).
addition (int): the addition information used for yes/no question provided by the dataset (the value
corresponding to the key answer_start).
t_pred (list[int]): the tag ids of the tags corresponding the each word in the predicted answer.
Returns:
float: the POS score.
"""
h = BeautifulSoup(open(f), "lxml")
p_gold, e_gold = set(), h.find(tid=t_gold)
if e_gold is None:
if len(t_pred) != 1:
return 0
else:
t = t_pred[0]
e_pred, e_prev = h.find(tid=t), h.find(tid=t-1)
if (e_pred is not None) or (addition == 1 and e_prev is not None) or\
(addition == 0 and e_prev is None):
return 0
else:
return 1
else:
p_gold.add(e_gold['tid'])
for e in e_gold.parents:
if int(e['tid']) < 2:
break
p_gold.add(e['tid'])
p = None
for t in t_pred:
p_pred, e_pred = set(), h.find(tid=t)
if e_pred is not None:
p_pred.add(e_pred['tid'])
if e_pred.name != 'html':
for e in e_pred.parents:
if int(e['tid']) < 2:
break
p_pred.add(e['tid'])
else:
p_pred.add(str(t))
if p is None:
p = p_pred
else:
p = p & p_pred # 预测值的公共祖先序列,except html&body
return len(p_gold & p) / len(p_gold | p)
def get_raw_scores(dataset, preds, tag_preds, root_dir):
r"""
Calculate all the three matrix (exact match, f1, POS) for each question.
Arguments:
dataset (dict): the dataset in use.
preds (dict): the answer text prediction for each question in the dataset.
tag_preds (dict): the answer tags prediction for each question in the dataset.
root_dir (str): the base directory for the html files.
Returns:
tuple(dict, dict, dict): exact match, f1, pos scores for each question.
"""
exact_scores = {}
f1_scores = {}
pos_scores = {}
for websites in dataset:
for w in websites['websites']:
f = os.path.join(root_dir, websites['domain'], w['page_id'][0:2], 'processed_data',
w['page_id'] + '.html')
for qa in w['qas']:
qid = qa['id']
gold_answers = [a['text'] for a in qa['answers']
if normalize_answer(a['text'])]
gold_tag_answers = [a['element_id'] for a in qa['answers']]
additional_tag_information = [a['answer_start'] for a in qa['answers']]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
gold_answers = ['']
if qid not in preds:
print('Missing prediction for %s' % qid)
continue
a_pred, t_pred = preds[qid], tag_preds[qid]
# Take max over all gold answers
exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers)
f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers)
pos_scores[qid] = max(compute_pos(f, t, a, t_pred)
for t, a in zip(gold_tag_answers, additional_tag_information))
return exact_scores, f1_scores, pos_scores
def make_eval_dict(exact_scores, f1_scores, pos_scores, qid_list=None):
r"""
Make the dictionary to show the evaluation results.
"""
if qid_list is None:
total = len(exact_scores)
return collections.OrderedDict([
('exact', 100.0 * sum(exact_scores.values()) / total),
('f1', 100.0 * sum(f1_scores.values()) / total),
('pos', 100.0 * sum(pos_scores.values()) / total),
('total', total),
])
else:
total = len(qid_list)
if total == 0:
return collections.OrderedDict([
('exact', 0),
('f1', 0),
('pos', 0),
('total', 0),
])
return collections.OrderedDict([
('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total),
('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total),
('pos', 100.0 * sum(pos_scores[k] for k in qid_list) / total),
('total', total),
])
def merge_eval(main_eval, new_eval, prefix):
for k in new_eval:
main_eval['%s_%s' % (prefix, k)] = new_eval[k]
def main(opts):
with open(opts.data_file) as f:
dataset_json = json.load(f)
dataset = dataset_json['data']
if isinstance(opts.pred_file, str):
with open(opts.pred_file) as f:
preds = json.load(f)
else:
preds = opts.pred_file
if isinstance(opts.tag_pred_file, str):
with open(opts.tag_pred_file) as f:
tag_preds = json.load(f)
else:
tag_preds = opts.tag_pred_file
qid_to_has_ans = make_qid_to_has_ans(dataset)
has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
exact, f1, pos = get_raw_scores(dataset, preds, tag_preds, opts.root_dir)
out_eval = make_eval_dict(exact, f1, pos)
if has_ans_qids:
has_ans_eval = make_eval_dict(exact, f1, pos, qid_list=has_ans_qids)
merge_eval(out_eval, has_ans_eval, 'HasAns')
if no_ans_qids:
no_ans_eval = make_eval_dict(exact, f1, pos, qid_list=no_ans_qids)
merge_eval(out_eval, no_ans_eval, 'NoAns')
print(json.dumps(out_eval, indent=2))
pages_list, write_eval = make_pages_list(dataset), deepcopy(out_eval)
for p in pages_list:
pages_ans_qids = [k for k, _ in qid_to_has_ans.items() if p in k]
page_eval = make_eval_dict(exact, f1, pos, qid_list=pages_ans_qids)
merge_eval(write_eval, page_eval, p)
if opts.result_file:
with open(opts.result_file, 'w') as f:
w = {}
for k, v in qid_to_has_ans.items():
w[k] = {'exact': exact[k], 'f1': f1[k], 'pos': pos[k]}
json.dump(w, f)
if opts.out_file:
with open(opts.out_file, 'w') as f:
json.dump(write_eval, f)
return out_eval
if __name__ == '__main__':
a="$4.99"
b="$4.99"
print(compute_exact(a,b))
|
py | 1a522f33284053cee6ba4c2b9db9bd2324832b74 | import Solution
|
py | 1a5232910e9439122a66de907a0ab2463ec4786d | import asyncio
from collections import defaultdict, deque
from collections.abc import Mapping, Set
from contextlib import suppress
from datetime import timedelta
from functools import partial
import inspect
import itertools
import json
import logging
import math
from numbers import Number
import operator
import os
import pickle
import random
import warnings
import weakref
import psutil
import sortedcontainers
from tlz import (
frequencies,
merge,
pluck,
merge_sorted,
first,
merge_with,
valmap,
second,
compose,
groupby,
concat,
)
from tornado.ioloop import IOLoop, PeriodicCallback
import dask
from . import profile
from .batched import BatchedSend
from .comm import (
normalize_address,
resolve_address,
get_address_host,
unparse_host_port,
)
from .comm.addressing import addresses_from_user_args
from .core import rpc, send_recv, clean_exception, CommClosedError, Status
from .diagnostics.plugin import SchedulerPlugin
from .http import get_handlers
from .metrics import time
from .node import ServerNode
from . import preloading
from .proctitle import setproctitle
from .security import Security
from .utils import (
All,
get_fileno_limit,
log_errors,
key_split,
validate_key,
no_default,
parse_timedelta,
parse_bytes,
shutting_down,
key_split_group,
empty_context,
tmpfile,
format_bytes,
format_time,
TimeoutError,
)
from .utils_comm import scatter_to_workers, gather_from_workers, retry_operation
from .utils_perf import enable_gc_diagnosis, disable_gc_diagnosis
from . import versions as version_module
from .publish import PublishExtension
from .queues import QueueExtension
from .semaphore import SemaphoreExtension
from .recreate_exceptions import ReplayExceptionScheduler
from .lock import LockExtension
from .event import EventExtension
from .pubsub import PubSubSchedulerExtension
from .stealing import WorkStealing
from .variable import VariableExtension
logger = logging.getLogger(__name__)
LOG_PDB = dask.config.get("distributed.admin.pdb-on-err")
DEFAULT_DATA_SIZE = parse_bytes(
dask.config.get("distributed.scheduler.default-data-size")
)
DEFAULT_EXTENSIONS = [
LockExtension,
PublishExtension,
ReplayExceptionScheduler,
QueueExtension,
VariableExtension,
PubSubSchedulerExtension,
SemaphoreExtension,
EventExtension,
]
ALL_TASK_STATES = {"released", "waiting", "no-worker", "processing", "erred", "memory"}
class ClientState:
"""
A simple object holding information about a client.
.. attribute:: client_key: str
A unique identifier for this client. This is generally an opaque
string generated by the client itself.
.. attribute:: wants_what: {TaskState}
A set of tasks this client wants kept in memory, so that it can
download its result when desired. This is the reverse mapping of
:class:`TaskState.who_wants`.
Tasks are typically removed from this set when the corresponding
object in the client's space (for example a ``Future`` or a Dask
collection) gets garbage-collected.
"""
__slots__ = ("client_key", "wants_what", "last_seen", "versions")
def __init__(self, client, versions=None):
self.client_key = client
self.wants_what = set()
self.last_seen = time()
self.versions = versions or {}
def __repr__(self):
return "<Client %r>" % (self.client_key,)
def __str__(self):
return self.client_key
class WorkerState:
"""
A simple object holding information about a worker.
.. attribute:: address
This worker's unique key. This can be its connected address
(such as ``'tcp://127.0.0.1:8891'``) or an alias (such as ``'alice'``).
.. attribute:: processing: {TaskState: cost}
A dictionary of tasks that have been submitted to this worker.
Each task state is asssociated with the expected cost in seconds
of running that task, summing both the task's expected computation
time and the expected communication time of its result.
Multiple tasks may be submitted to a worker in advance and the worker
will run them eventually, depending on its execution resources
(but see :doc:`work-stealing`).
All the tasks here are in the "processing" state.
This attribute is kept in sync with :attr:`TaskState.processing_on`.
.. attribute:: has_what: {TaskState}
The set of tasks which currently reside on this worker.
All the tasks here are in the "memory" state.
This is the reverse mapping of :class:`TaskState.who_has`.
.. attribute:: nbytes: int
The total memory size, in bytes, used by the tasks this worker
holds in memory (i.e. the tasks in this worker's :attr:`has_what`).
.. attribute:: nthreads: int
The number of CPU threads made available on this worker.
.. attribute:: resources: {str: Number}
The available resources on this worker like ``{'gpu': 2}``.
These are abstract quantities that constrain certain tasks from
running at the same time on this worker.
.. attribute:: used_resources: {str: Number}
The sum of each resource used by all tasks allocated to this worker.
The numbers in this dictionary can only be less or equal than
those in this worker's :attr:`resources`.
.. attribute:: occupancy: Number
The total expected runtime, in seconds, of all tasks currently
processing on this worker. This is the sum of all the costs in
this worker's :attr:`processing` dictionary.
.. attribute:: status: str
The current status of the worker, either ``'running'`` or ``'closed'``
.. attribute:: nanny: str
Address of the associated Nanny, if present
.. attribute:: last_seen: Number
The last time we received a heartbeat from this worker, in local
scheduler time.
.. attribute:: actors: {TaskState}
A set of all TaskStates on this worker that are actors. This only
includes those actors whose state actually lives on this worker, not
actors to which this worker has a reference.
"""
# XXX need a state field to signal active/removed?
__slots__ = (
"actors",
"address",
"bandwidth",
"extra",
"has_what",
"last_seen",
"local_directory",
"memory_limit",
"metrics",
"name",
"nanny",
"nbytes",
"nthreads",
"occupancy",
"pid",
"processing",
"resources",
"services",
"_status",
"time_delay",
"used_resources",
"versions",
)
def __init__(
self,
address=None,
pid=0,
name=None,
nthreads=0,
memory_limit=0,
local_directory=None,
services=None,
versions=None,
nanny=None,
extra=None,
):
self.address = address
self.pid = pid
self.name = name
self.nthreads = nthreads
self.memory_limit = memory_limit
self.local_directory = local_directory
self.services = services or {}
self.versions = versions or {}
self.nanny = nanny
self._status = Status.running
self.nbytes = 0
self.occupancy = 0
self.metrics = {}
self.last_seen = 0
self.time_delay = 0
self.bandwidth = parse_bytes(dask.config.get("distributed.scheduler.bandwidth"))
self.actors = set()
self.has_what = set()
self.processing = {}
self.resources = {}
self.used_resources = {}
self.extra = extra or {}
def __hash__(self):
return hash(self.address)
def __eq__(self, other):
return type(self) == type(other) and self.address == other.address
@property
def status(self):
return self._status
@status.setter
def status(self, new_status):
if isinstance(new_status, Status):
self._status = new_status
elif isinstance(new_status, str) or new_status is None:
corresponding_enum_variants = [s for s in Status if s.value == new_status]
assert len(corresponding_enum_variants) == 1
self._status = corresponding_enum_variants[0]
@property
def host(self):
return get_address_host(self.address)
def clean(self):
""" Return a version of this object that is appropriate for serialization """
ws = WorkerState(
address=self.address,
pid=self.pid,
name=self.name,
nthreads=self.nthreads,
memory_limit=self.memory_limit,
local_directory=self.local_directory,
services=self.services,
nanny=self.nanny,
extra=self.extra,
)
ws.processing = {ts.key for ts in self.processing}
return ws
def __repr__(self):
return "<Worker %r, name: %s, memory: %d, processing: %d>" % (
self.address,
self.name,
len(self.has_what),
len(self.processing),
)
def identity(self):
return {
"type": "Worker",
"id": self.name,
"host": self.host,
"resources": self.resources,
"local_directory": self.local_directory,
"name": self.name,
"nthreads": self.nthreads,
"memory_limit": self.memory_limit,
"last_seen": self.last_seen,
"services": self.services,
"metrics": self.metrics,
"nanny": self.nanny,
**self.extra,
}
@property
def ncores(self):
warnings.warn("WorkerState.ncores has moved to WorkerState.nthreads")
return self.nthreads
class TaskState:
"""
A simple object holding information about a task.
.. attribute:: key: str
The key is the unique identifier of a task, generally formed
from the name of the function, followed by a hash of the function
and arguments, like ``'inc-ab31c010444977004d656610d2d421ec'``.
.. attribute:: prefix: TaskPrefix
The broad class of tasks to which this task belongs like "inc" or
"read_csv"
.. attribute:: run_spec: object
A specification of how to run the task. The type and meaning of this
value is opaque to the scheduler, as it is only interpreted by the
worker to which the task is sent for executing.
As a special case, this attribute may also be ``None``, in which case
the task is "pure data" (such as, for example, a piece of data loaded
in the scheduler using :meth:`Client.scatter`). A "pure data" task
cannot be computed again if its value is lost.
.. attribute:: priority: tuple
The priority provides each task with a relative ranking which is used
to break ties when many tasks are being considered for execution.
This ranking is generally a 2-item tuple. The first (and dominant)
item corresponds to when it was submitted. Generally, earlier tasks
take precedence. The second item is determined by the client, and is
a way to prioritize tasks within a large graph that may be important,
such as if they are on the critical path, or good to run in order to
release many dependencies. This is explained further in
:doc:`Scheduling Policy <scheduling-policies>`.
.. attribute:: state: str
This task's current state. Valid states include ``released``,
``waiting``, ``no-worker``, ``processing``, ``memory``, ``erred``
and ``forgotten``. If it is ``forgotten``, the task isn't stored
in the ``tasks`` dictionary anymore and will probably disappear
soon from memory.
.. attribute:: dependencies: {TaskState}
The set of tasks this task depends on for proper execution. Only
tasks still alive are listed in this set. If, for whatever reason,
this task also depends on a forgotten task, the
:attr:`has_lost_dependencies` flag is set.
A task can only be executed once all its dependencies have already
been successfully executed and have their result stored on at least
one worker. This is tracked by progressively draining the
:attr:`waiting_on` set.
.. attribute:: dependents: {TaskState}
The set of tasks which depend on this task. Only tasks still alive
are listed in this set.
This is the reverse mapping of :attr:`dependencies`.
.. attribute:: has_lost_dependencies: bool
Whether any of the dependencies of this task has been forgotten.
For memory consumption reasons, forgotten tasks are not kept in
memory even though they may have dependent tasks. When a task is
forgotten, therefore, each of its dependents has their
:attr:`has_lost_dependencies` attribute set to ``True``.
If :attr:`has_lost_dependencies` is true, this task cannot go
into the "processing" state anymore.
.. attribute:: waiting_on: {TaskState}
The set of tasks this task is waiting on *before* it can be executed.
This is always a subset of :attr:`dependencies`. Each time one of the
dependencies has finished processing, it is removed from the
:attr:`waiting_on` set.
Once :attr:`waiting_on` becomes empty, this task can move from the
"waiting" state to the "processing" state (unless one of the
dependencies errored out, in which case this task is instead
marked "erred").
.. attribute:: waiters: {TaskState}
The set of tasks which need this task to remain alive. This is always
a subset of :attr:`dependents`. Each time one of the dependents
has finished processing, it is removed from the :attr:`waiters`
set.
Once both :attr:`waiters` and :attr:`who_wants` become empty, this
task can be released (if it has a non-empty :attr:`run_spec`) or
forgotten (otherwise) by the scheduler, and by any workers
in :attr:`who_has`.
.. note:: Counter-intuitively, :attr:`waiting_on` and
:attr:`waiters` are not reverse mappings of each other.
.. attribute:: who_wants: {ClientState}
The set of clients who want this task's result to remain alive.
This is the reverse mapping of :attr:`ClientState.wants_what`.
When a client submits a graph to the scheduler it also specifies
which output tasks it desires, such that their results are not released
from memory.
Once a task has finished executing (i.e. moves into the "memory"
or "erred" state), the clients in :attr:`who_wants` are notified.
Once both :attr:`waiters` and :attr:`who_wants` become empty, this
task can be released (if it has a non-empty :attr:`run_spec`) or
forgotten (otherwise) by the scheduler, and by any workers
in :attr:`who_has`.
.. attribute:: who_has: {WorkerState}
The set of workers who have this task's result in memory.
It is non-empty iff the task is in the "memory" state. There can be
more than one worker in this set if, for example, :meth:`Client.scatter`
or :meth:`Client.replicate` was used.
This is the reverse mapping of :attr:`WorkerState.has_what`.
.. attribute:: processing_on: WorkerState (or None)
If this task is in the "processing" state, which worker is currently
processing it. Otherwise this is ``None``.
This attribute is kept in sync with :attr:`WorkerState.processing`.
.. attribute:: retries: int
The number of times this task can automatically be retried in case
of failure. If a task fails executing (the worker returns with
an error), its :attr:`retries` attribute is checked. If it is
equal to 0, the task is marked "erred". If it is greater than 0,
the :attr:`retries` attribute is decremented and execution is
attempted again.
.. attribute:: nbytes: int (or None)
The number of bytes, as determined by ``sizeof``, of the result
of a finished task. This number is used for diagnostics and to
help prioritize work.
.. attribute:: type: str
The type of the object as a string. Only present for tasks that have
been computed.
.. attribute:: exception: object
If this task failed executing, the exception object is stored here.
Otherwise this is ``None``.
.. attribute:: traceback: object
If this task failed executing, the traceback object is stored here.
Otherwise this is ``None``.
.. attribute:: exception_blame: TaskState (or None)
If this task or one of its dependencies failed executing, the
failed task is stored here (possibly itself). Otherwise this
is ``None``.
.. attribute:: suspicious: int
The number of times this task has been involved in a worker death.
Some tasks may cause workers to die (such as calling ``os._exit(0)``).
When a worker dies, all of the tasks on that worker are reassigned
to others. This combination of behaviors can cause a bad task to
catastrophically destroy all workers on the cluster, one after
another. Whenever a worker dies, we mark each task currently
processing on that worker (as recorded by
:attr:`WorkerState.processing`) as suspicious.
If a task is involved in three deaths (or some other fixed constant)
then we mark the task as ``erred``.
.. attribute:: host_restrictions: {hostnames}
A set of hostnames where this task can be run (or ``None`` if empty).
Usually this is empty unless the task has been specifically restricted
to only run on certain hosts. A hostname may correspond to one or
several connected workers.
.. attribute:: worker_restrictions: {worker addresses}
A set of complete worker addresses where this can be run (or ``None``
if empty). Usually this is empty unless the task has been specifically
restricted to only run on certain workers.
Note this is tracking worker addresses, not worker states, since
the specific workers may not be connected at this time.
.. attribute:: resource_restrictions: {resource: quantity}
Resources required by this task, such as ``{'gpu': 1}`` or
``{'memory': 1e9}`` (or ``None`` if empty). These are user-defined
names and are matched against the contents of each
:attr:`WorkerState.resources` dictionary.
.. attribute:: loose_restrictions: bool
If ``False``, each of :attr:`host_restrictions`,
:attr:`worker_restrictions` and :attr:`resource_restrictions` is
a hard constraint: if no worker is available satisfying those
restrictions, the task cannot go into the "processing" state and
will instead go into the "no-worker" state.
If ``True``, the above restrictions are mere preferences: if no worker
is available satisfying those restrictions, the task can still go
into the "processing" state and be sent for execution to another
connected worker.
.. attribute: actor: bool
Whether or not this task is an Actor.
.. attribute: group: TaskGroup
: The group of tasks to which this one belongs.
"""
__slots__ = (
# === General description ===
"actor",
# Key name
"key",
# Key prefix (see key_split())
"prefix",
# How to run the task (None if pure data)
"run_spec",
# Alive dependents and dependencies
"dependencies",
"dependents",
# Compute priority
"priority",
# Restrictions
"host_restrictions",
"worker_restrictions", # not WorkerStates but addresses
"resource_restrictions",
"loose_restrictions",
# === Task state ===
"_state",
# Whether some dependencies were forgotten
"has_lost_dependencies",
# If in 'waiting' state, which tasks need to complete
# before we can run
"waiting_on",
# If in 'waiting' or 'processing' state, which tasks needs us
# to complete before they can run
"waiters",
# In in 'processing' state, which worker we are processing on
"processing_on",
# If in 'memory' state, Which workers have us
"who_has",
# Which clients want us
"who_wants",
"exception",
"traceback",
"exception_blame",
"suspicious",
"retries",
"nbytes",
"type",
"group_key",
"group",
)
def __init__(self, key, run_spec):
self.key = key
self.run_spec = run_spec
self._state = None
self.exception = self.traceback = self.exception_blame = None
self.suspicious = self.retries = 0
self.nbytes = None
self.priority = None
self.who_wants = set()
self.dependencies = set()
self.dependents = set()
self.waiting_on = set()
self.waiters = set()
self.who_has = set()
self.processing_on = None
self.has_lost_dependencies = False
self.host_restrictions = None
self.worker_restrictions = None
self.resource_restrictions = None
self.loose_restrictions = False
self.actor = None
self.type = None
self.group_key = key_split_group(key)
self.group = None
@property
def state(self) -> str:
return self._state
@property
def prefix_key(self):
return self.prefix.name
@state.setter
def state(self, value: str):
self.group.states[self._state] -= 1
self.group.states[value] += 1
self._state = value
def add_dependency(self, other: "TaskState"):
""" Add another task as a dependency of this task """
self.dependencies.add(other)
self.group.dependencies.add(other.group)
other.dependents.add(self)
def get_nbytes(self) -> int:
nbytes = self.nbytes
return nbytes if nbytes is not None else DEFAULT_DATA_SIZE
def set_nbytes(self, nbytes: int):
old_nbytes = self.nbytes
diff = nbytes - (old_nbytes or 0)
self.group.nbytes_total += diff
self.group.nbytes_in_memory += diff
for ws in self.who_has:
ws.nbytes += diff
self.nbytes = nbytes
def __repr__(self):
return "<Task %r %s>" % (self.key, self.state)
def validate(self):
try:
for cs in self.who_wants:
assert isinstance(cs, ClientState), (repr(cs), self.who_wants)
for ws in self.who_has:
assert isinstance(ws, WorkerState), (repr(ws), self.who_has)
for ts in self.dependencies:
assert isinstance(ts, TaskState), (repr(ts), self.dependencies)
for ts in self.dependents:
assert isinstance(ts, TaskState), (repr(ts), self.dependents)
validate_task_state(self)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
class TaskGroup:
""" Collection tracking all tasks within a group
Keys often have a structure like ``("x-123", 0)``
A group takes the first section, like ``"x-123"``
.. attribute:: name: str
The name of a group of tasks.
For a task like ``("x-123", 0)`` this is the text ``"x-123"``
.. attribute:: states: Dict[str, int]
The number of tasks in each state,
like ``{"memory": 10, "processing": 3, "released": 4, ...}``
.. attribute:: dependencies: Set[TaskGroup]
The other TaskGroups on which this one depends
.. attribute:: nbytes_total: int
The total number of bytes that this task group has produced
.. attribute:: nbytes_in_memory: int
The number of bytes currently stored by this TaskGroup
.. attribute:: duration: float
The total amount of time spent on all tasks in this TaskGroup
.. attribute:: types: Set[str]
The result types of this TaskGroup
See also
--------
TaskPrefix
"""
def __init__(self, name):
self.name = name
self.states = {state: 0 for state in ALL_TASK_STATES}
self.states["forgotten"] = 0
self.dependencies = set()
self.nbytes_total = 0
self.nbytes_in_memory = 0
self.duration = 0
self.types = set()
def add(self, ts):
self.states[ts.state] += 1
ts.group = self
def __repr__(self):
return (
"<"
+ (self.name or "no-group")
+ ": "
+ ", ".join(
"%s: %d" % (k, v) for (k, v) in sorted(self.states.items()) if v
)
+ ">"
)
def __len__(self):
return sum(self.states.values())
class TaskPrefix:
""" Collection tracking all tasks within a group
Keys often have a structure like ``("x-123", 0)``
A group takes the first section, like ``"x"``
.. attribute:: name: str
The name of a group of tasks.
For a task like ``("x-123", 0)`` this is the text ``"x"``
.. attribute:: states: Dict[str, int]
The number of tasks in each state,
like ``{"memory": 10, "processing": 3, "released": 4, ...}``
.. attribute:: duration_average: float
An exponentially weighted moving average duration of all tasks with this prefix
.. attribute:: suspicious: int
Numbers of times a task was marked as suspicious with this prefix
See Also
--------
TaskGroup
"""
def __init__(self, name):
self.name = name
self.groups = []
# store timings for each prefix-action
self.all_durations = defaultdict(float)
if self.name in dask.config.get("distributed.scheduler.default-task-durations"):
self.duration_average = parse_timedelta(
dask.config.get("distributed.scheduler.default-task-durations")[
self.name
]
)
else:
self.duration_average = None
self.suspicious = 0
@property
def states(self):
return merge_with(sum, [g.states for g in self.groups])
@property
def active(self):
return [
g
for g in self.groups
if any(v != 0 for k, v in g.states.items() if k != "forgotten")
]
@property
def active_states(self):
return merge_with(sum, [g.states for g in self.active])
def __repr__(self):
return (
"<"
+ self.name
+ ": "
+ ", ".join(
"%s: %d" % (k, v) for (k, v) in sorted(self.states.items()) if v
)
+ ">"
)
@property
def nbytes_in_memory(self):
return sum(tg.nbytes_in_memory for tg in self.groups)
@property
def nbytes_total(self):
return sum(tg.nbytes_total for tg in self.groups)
def __len__(self):
return sum(map(len, self.groups))
@property
def duration(self):
return sum(tg.duration for tg in self.groups)
@property
def types(self):
return set().union(*[tg.types for tg in self.groups])
class _StateLegacyMapping(Mapping):
"""
A mapping interface mimicking the former Scheduler state dictionaries.
"""
def __init__(self, states, accessor):
self._states = states
self._accessor = accessor
def __iter__(self):
return iter(self._states)
def __len__(self):
return len(self._states)
def __getitem__(self, key):
return self._accessor(self._states[key])
def __repr__(self):
return "%s(%s)" % (self.__class__, dict(self))
class _OptionalStateLegacyMapping(_StateLegacyMapping):
"""
Similar to _StateLegacyMapping, but a false-y value is interpreted
as a missing key.
"""
# For tasks etc.
def __iter__(self):
accessor = self._accessor
for k, v in self._states.items():
if accessor(v):
yield k
def __len__(self):
accessor = self._accessor
return sum(bool(accessor(v)) for v in self._states.values())
def __getitem__(self, key):
v = self._accessor(self._states[key])
if v:
return v
else:
raise KeyError
class _StateLegacySet(Set):
"""
Similar to _StateLegacyMapping, but exposes a set containing
all values with a true value.
"""
# For loose_restrictions
def __init__(self, states, accessor):
self._states = states
self._accessor = accessor
def __iter__(self):
return (k for k, v in self._states.items() if self._accessor(v))
def __len__(self):
return sum(map(bool, map(self._accessor, self._states.values())))
def __contains__(self, k):
st = self._states.get(k)
return st is not None and bool(self._accessor(st))
def __repr__(self):
return "%s(%s)" % (self.__class__, set(self))
def _legacy_task_key_set(tasks):
"""
Transform a set of task states into a set of task keys.
"""
return {ts.key for ts in tasks}
def _legacy_client_key_set(clients):
"""
Transform a set of client states into a set of client keys.
"""
return {cs.client_key for cs in clients}
def _legacy_worker_key_set(workers):
"""
Transform a set of worker states into a set of worker keys.
"""
return {ws.address for ws in workers}
def _legacy_task_key_dict(task_dict):
"""
Transform a dict of {task state: value} into a dict of {task key: value}.
"""
return {ts.key: value for ts, value in task_dict.items()}
def _task_key_or_none(task):
return task.key if task is not None else None
class Scheduler(ServerNode):
""" Dynamic distributed task scheduler
The scheduler tracks the current state of workers, data, and computations.
The scheduler listens for events and responds by controlling workers
appropriately. It continuously tries to use the workers to execute an ever
growing dask graph.
All events are handled quickly, in linear time with respect to their input
(which is often of constant size) and generally within a millisecond. To
accomplish this the scheduler tracks a lot of state. Every operation
maintains the consistency of this state.
The scheduler communicates with the outside world through Comm objects.
It maintains a consistent and valid view of the world even when listening
to several clients at once.
A Scheduler is typically started either with the ``dask-scheduler``
executable::
$ dask-scheduler
Scheduler started at 127.0.0.1:8786
Or within a LocalCluster a Client starts up without connection
information::
>>> c = Client() # doctest: +SKIP
>>> c.cluster.scheduler # doctest: +SKIP
Scheduler(...)
Users typically do not interact with the scheduler directly but rather with
the client object ``Client``.
**State**
The scheduler contains the following state variables. Each variable is
listed along with what it stores and a brief description.
* **tasks:** ``{task key: TaskState}``
Tasks currently known to the scheduler
* **unrunnable:** ``{TaskState}``
Tasks in the "no-worker" state
* **workers:** ``{worker key: WorkerState}``
Workers currently connected to the scheduler
* **idle:** ``{WorkerState}``:
Set of workers that are not fully utilized
* **saturated:** ``{WorkerState}``:
Set of workers that are not over-utilized
* **host_info:** ``{hostname: dict}``:
Information about each worker host
* **clients:** ``{client key: ClientState}``
Clients currently connected to the scheduler
* **services:** ``{str: port}``:
Other services running on this scheduler, like Bokeh
* **loop:** ``IOLoop``:
The running Tornado IOLoop
* **client_comms:** ``{client key: Comm}``
For each client, a Comm object used to receive task requests and
report task status updates.
* **stream_comms:** ``{worker key: Comm}``
For each worker, a Comm object from which we both accept stimuli and
report results
* **task_duration:** ``{key-prefix: time}``
Time we expect certain functions to take, e.g. ``{'sum': 0.25}``
"""
default_port = 8786
_instances = weakref.WeakSet()
def __init__(
self,
loop=None,
delete_interval="500ms",
synchronize_worker_interval="60s",
services=None,
service_kwargs=None,
allowed_failures=None,
extensions=None,
validate=None,
scheduler_file=None,
security=None,
worker_ttl=None,
idle_timeout=None,
interface=None,
host=None,
port=0,
protocol=None,
dashboard_address=None,
dashboard=None,
http_prefix="/",
preload=None,
preload_argv=(),
plugins=(),
**kwargs,
):
self._setup_logging(logger)
# Attributes
if allowed_failures is None:
allowed_failures = dask.config.get("distributed.scheduler.allowed-failures")
self.allowed_failures = allowed_failures
if validate is None:
validate = dask.config.get("distributed.scheduler.validate")
self.validate = validate
self.proc = psutil.Process()
self.delete_interval = parse_timedelta(delete_interval, default="ms")
self.synchronize_worker_interval = parse_timedelta(
synchronize_worker_interval, default="ms"
)
self.digests = None
self.service_specs = services or {}
self.service_kwargs = service_kwargs or {}
self.services = {}
self.scheduler_file = scheduler_file
worker_ttl = worker_ttl or dask.config.get("distributed.scheduler.worker-ttl")
self.worker_ttl = parse_timedelta(worker_ttl) if worker_ttl else None
idle_timeout = idle_timeout or dask.config.get(
"distributed.scheduler.idle-timeout"
)
if idle_timeout:
self.idle_timeout = parse_timedelta(idle_timeout)
else:
self.idle_timeout = None
self.idle_since = time()
self._lock = asyncio.Lock()
self.bandwidth = parse_bytes(dask.config.get("distributed.scheduler.bandwidth"))
self.bandwidth_workers = defaultdict(float)
self.bandwidth_types = defaultdict(float)
if not preload:
preload = dask.config.get("distributed.scheduler.preload")
if not preload_argv:
preload_argv = dask.config.get("distributed.scheduler.preload-argv")
self.preloads = preloading.process_preloads(self, preload, preload_argv)
if isinstance(security, dict):
security = Security(**security)
self.security = security or Security()
assert isinstance(self.security, Security)
self.connection_args = self.security.get_connection_args("scheduler")
self._start_address = addresses_from_user_args(
host=host,
port=port,
interface=interface,
protocol=protocol,
security=security,
default_port=self.default_port,
)
routes = get_handlers(
server=self,
modules=dask.config.get("distributed.scheduler.http.routes"),
prefix=http_prefix,
)
self.start_http_server(routes, dashboard_address, default_port=8787)
if dashboard or (dashboard is None and dashboard_address):
try:
import distributed.dashboard.scheduler
except ImportError:
logger.debug("To start diagnostics web server please install Bokeh")
else:
distributed.dashboard.scheduler.connect(
self.http_application, self.http_server, self, prefix=http_prefix
)
# Communication state
self.loop = loop or IOLoop.current()
self.client_comms = dict()
self.stream_comms = dict()
self._worker_coroutines = []
self._ipython_kernel = None
# Task state
self.tasks = dict()
self.task_groups = dict()
self.task_prefixes = dict()
for old_attr, new_attr, wrap in [
("priority", "priority", None),
("dependencies", "dependencies", _legacy_task_key_set),
("dependents", "dependents", _legacy_task_key_set),
("retries", "retries", None),
]:
func = operator.attrgetter(new_attr)
if wrap is not None:
func = compose(wrap, func)
setattr(self, old_attr, _StateLegacyMapping(self.tasks, func))
for old_attr, new_attr, wrap in [
("nbytes", "nbytes", None),
("who_wants", "who_wants", _legacy_client_key_set),
("who_has", "who_has", _legacy_worker_key_set),
("waiting", "waiting_on", _legacy_task_key_set),
("waiting_data", "waiters", _legacy_task_key_set),
("rprocessing", "processing_on", None),
("host_restrictions", "host_restrictions", None),
("worker_restrictions", "worker_restrictions", None),
("resource_restrictions", "resource_restrictions", None),
("suspicious_tasks", "suspicious", None),
("exceptions", "exception", None),
("tracebacks", "traceback", None),
("exceptions_blame", "exception_blame", _task_key_or_none),
]:
func = operator.attrgetter(new_attr)
if wrap is not None:
func = compose(wrap, func)
setattr(self, old_attr, _OptionalStateLegacyMapping(self.tasks, func))
for old_attr, new_attr, wrap in [
("loose_restrictions", "loose_restrictions", None)
]:
func = operator.attrgetter(new_attr)
if wrap is not None:
func = compose(wrap, func)
setattr(self, old_attr, _StateLegacySet(self.tasks, func))
self.generation = 0
self._last_client = None
self._last_time = 0
self.unrunnable = set()
self.n_tasks = 0
self.task_metadata = dict()
self.datasets = dict()
# Prefix-keyed containers
self.unknown_durations = defaultdict(set)
# Client state
self.clients = dict()
for old_attr, new_attr, wrap in [
("wants_what", "wants_what", _legacy_task_key_set)
]:
func = operator.attrgetter(new_attr)
if wrap is not None:
func = compose(wrap, func)
setattr(self, old_attr, _StateLegacyMapping(self.clients, func))
self.clients["fire-and-forget"] = ClientState("fire-and-forget")
# Worker state
self.workers = sortedcontainers.SortedDict()
for old_attr, new_attr, wrap in [
("nthreads", "nthreads", None),
("worker_bytes", "nbytes", None),
("worker_resources", "resources", None),
("used_resources", "used_resources", None),
("occupancy", "occupancy", None),
("worker_info", "metrics", None),
("processing", "processing", _legacy_task_key_dict),
("has_what", "has_what", _legacy_task_key_set),
]:
func = operator.attrgetter(new_attr)
if wrap is not None:
func = compose(wrap, func)
setattr(self, old_attr, _StateLegacyMapping(self.workers, func))
self.idle = sortedcontainers.SortedSet(key=operator.attrgetter("address"))
self.saturated = set()
self.total_nthreads = 0
self.total_occupancy = 0
self.host_info = defaultdict(dict)
self.resources = defaultdict(dict)
self.aliases = dict()
self._task_state_collections = [self.unrunnable]
self._worker_collections = [
self.workers,
self.host_info,
self.resources,
self.aliases,
]
self.extensions = {}
self.plugins = list(plugins)
self.transition_log = deque(
maxlen=dask.config.get("distributed.scheduler.transition-log-length")
)
self.log = deque(
maxlen=dask.config.get("distributed.scheduler.transition-log-length")
)
self.worker_plugins = []
worker_handlers = {
"task-finished": self.handle_task_finished,
"task-erred": self.handle_task_erred,
"release": self.handle_release_data,
"release-worker-data": self.release_worker_data,
"add-keys": self.add_keys,
"missing-data": self.handle_missing_data,
"long-running": self.handle_long_running,
"reschedule": self.reschedule,
"keep-alive": lambda *args, **kwargs: None,
}
client_handlers = {
"update-graph": self.update_graph,
"client-desires-keys": self.client_desires_keys,
"update-data": self.update_data,
"report-key": self.report_on_key,
"client-releases-keys": self.client_releases_keys,
"heartbeat-client": self.client_heartbeat,
"close-client": self.remove_client,
"restart": self.restart,
}
self.handlers = {
"register-client": self.add_client,
"scatter": self.scatter,
"register-worker": self.add_worker,
"unregister": self.remove_worker,
"gather": self.gather,
"cancel": self.stimulus_cancel,
"retry": self.stimulus_retry,
"feed": self.feed,
"terminate": self.close,
"broadcast": self.broadcast,
"proxy": self.proxy,
"ncores": self.get_ncores,
"has_what": self.get_has_what,
"who_has": self.get_who_has,
"processing": self.get_processing,
"call_stack": self.get_call_stack,
"profile": self.get_profile,
"performance_report": self.performance_report,
"get_logs": self.get_logs,
"logs": self.get_logs,
"worker_logs": self.get_worker_logs,
"nbytes": self.get_nbytes,
"versions": self.versions,
"add_keys": self.add_keys,
"rebalance": self.rebalance,
"replicate": self.replicate,
"start_ipython": self.start_ipython,
"run_function": self.run_function,
"update_data": self.update_data,
"set_resources": self.add_resources,
"retire_workers": self.retire_workers,
"get_metadata": self.get_metadata,
"set_metadata": self.set_metadata,
"heartbeat_worker": self.heartbeat_worker,
"get_task_status": self.get_task_status,
"get_task_stream": self.get_task_stream,
"register_worker_plugin": self.register_worker_plugin,
"adaptive_target": self.adaptive_target,
"workers_to_close": self.workers_to_close,
"subscribe_worker_status": self.subscribe_worker_status,
}
self._transitions = {
("released", "waiting"): self.transition_released_waiting,
("waiting", "released"): self.transition_waiting_released,
("waiting", "processing"): self.transition_waiting_processing,
("waiting", "memory"): self.transition_waiting_memory,
("processing", "released"): self.transition_processing_released,
("processing", "memory"): self.transition_processing_memory,
("processing", "erred"): self.transition_processing_erred,
("no-worker", "released"): self.transition_no_worker_released,
("no-worker", "waiting"): self.transition_no_worker_waiting,
("released", "forgotten"): self.transition_released_forgotten,
("memory", "forgotten"): self.transition_memory_forgotten,
("erred", "forgotten"): self.transition_released_forgotten,
("erred", "released"): self.transition_erred_released,
("memory", "released"): self.transition_memory_released,
("released", "erred"): self.transition_released_erred,
}
connection_limit = get_fileno_limit() / 2
super(Scheduler, self).__init__(
handlers=self.handlers,
stream_handlers=merge(worker_handlers, client_handlers),
io_loop=self.loop,
connection_limit=connection_limit,
deserialize=False,
connection_args=self.connection_args,
**kwargs,
)
if self.worker_ttl:
pc = PeriodicCallback(self.check_worker_ttl, self.worker_ttl)
self.periodic_callbacks["worker-ttl"] = pc
if self.idle_timeout:
pc = PeriodicCallback(self.check_idle, self.idle_timeout / 4)
self.periodic_callbacks["idle-timeout"] = pc
if extensions is None:
extensions = list(DEFAULT_EXTENSIONS)
if dask.config.get("distributed.scheduler.work-stealing"):
extensions.append(WorkStealing)
for ext in extensions:
ext(self)
setproctitle("dask-scheduler [not started]")
Scheduler._instances.add(self)
self.rpc.allow_offload = False
self.status = Status.undefined
@property
def status(self):
return self._status
@status.setter
def status(self, new_status):
if isinstance(new_status, Status):
self._status = new_status
elif isinstance(new_status, str) or new_status is None:
corresponding_enum_variants = [s for s in Status if s.value == new_status]
assert len(corresponding_enum_variants) == 1
self._status = corresponding_enum_variants[0]
##################
# Administration #
##################
def __repr__(self):
return '<Scheduler: "%s" processes: %d cores: %d>' % (
self.address,
len(self.workers),
self.total_nthreads,
)
def identity(self, comm=None):
""" Basic information about ourselves and our cluster """
d = {
"type": type(self).__name__,
"id": str(self.id),
"address": self.address,
"services": {key: v.port for (key, v) in self.services.items()},
"workers": {
worker.address: worker.identity() for worker in self.workers.values()
},
}
return d
def get_worker_service_addr(self, worker, service_name, protocol=False):
"""
Get the (host, port) address of the named service on the *worker*.
Returns None if the service doesn't exist.
Parameters
----------
worker : address
service_name : str
Common services include 'bokeh' and 'nanny'
protocol : boolean
Whether or not to include a full address with protocol (True)
or just a (host, port) pair
"""
ws = self.workers[worker]
port = ws.services.get(service_name)
if port is None:
return None
elif protocol:
return "%(protocol)s://%(host)s:%(port)d" % {
"protocol": ws.address.split("://")[0],
"host": ws.host,
"port": port,
}
else:
return ws.host, port
async def start(self):
""" Clear out old state and restart all running coroutines """
await super().start()
assert self.status != Status.running
enable_gc_diagnosis()
self.clear_task_state()
with suppress(AttributeError):
for c in self._worker_coroutines:
c.cancel()
for addr in self._start_address:
await self.listen(
addr, allow_offload=False, **self.security.get_listen_args("scheduler")
)
self.ip = get_address_host(self.listen_address)
listen_ip = self.ip
if listen_ip == "0.0.0.0":
listen_ip = ""
if self.address.startswith("inproc://"):
listen_ip = "localhost"
# Services listen on all addresses
self.start_services(listen_ip)
for listener in self.listeners:
logger.info(" Scheduler at: %25s", listener.contact_address)
for k, v in self.services.items():
logger.info("%11s at: %25s", k, "%s:%d" % (listen_ip, v.port))
self.loop.add_callback(self.reevaluate_occupancy)
if self.scheduler_file:
with open(self.scheduler_file, "w") as f:
json.dump(self.identity(), f, indent=2)
fn = self.scheduler_file # remove file when we close the process
def del_scheduler_file():
if os.path.exists(fn):
os.remove(fn)
weakref.finalize(self, del_scheduler_file)
for preload in self.preloads:
await preload.start()
await asyncio.gather(*[plugin.start(self) for plugin in self.plugins])
self.start_periodic_callbacks()
setproctitle("dask-scheduler [%s]" % (self.address,))
return self
async def close(self, comm=None, fast=False, close_workers=False):
""" Send cleanup signal to all coroutines then wait until finished
See Also
--------
Scheduler.cleanup
"""
if self.status in (Status.closing, Status.closed, Status.closing_gracefully):
await self.finished()
return
self.status = Status.closing
logger.info("Scheduler closing...")
setproctitle("dask-scheduler [closing]")
for preload in self.preloads:
await preload.teardown()
if close_workers:
await self.broadcast(msg={"op": "close_gracefully"}, nanny=True)
for worker in self.workers:
self.worker_send(worker, {"op": "close"})
for i in range(20): # wait a second for send signals to clear
if self.workers:
await asyncio.sleep(0.05)
else:
break
await asyncio.gather(*[plugin.close() for plugin in self.plugins])
for pc in self.periodic_callbacks.values():
pc.stop()
self.periodic_callbacks.clear()
self.stop_services()
for ext in self.extensions.values():
with suppress(AttributeError):
ext.teardown()
logger.info("Scheduler closing all comms")
futures = []
for w, comm in list(self.stream_comms.items()):
if not comm.closed():
comm.send({"op": "close", "report": False})
comm.send({"op": "close-stream"})
with suppress(AttributeError):
futures.append(comm.close())
for future in futures: # TODO: do all at once
await future
for comm in self.client_comms.values():
comm.abort()
await self.rpc.close()
self.status = Status.closed
self.stop()
await super(Scheduler, self).close()
setproctitle("dask-scheduler [closed]")
disable_gc_diagnosis()
async def close_worker(self, comm=None, worker=None, safe=None):
""" Remove a worker from the cluster
This both removes the worker from our local state and also sends a
signal to the worker to shut down. This works regardless of whether or
not the worker has a nanny process restarting it
"""
logger.info("Closing worker %s", worker)
with log_errors():
self.log_event(worker, {"action": "close-worker"})
nanny_addr = self.workers[worker].nanny
address = nanny_addr or worker
self.worker_send(worker, {"op": "close", "report": False})
await self.remove_worker(address=worker, safe=safe)
###########
# Stimuli #
###########
def heartbeat_worker(
self,
comm=None,
address=None,
resolve_address=True,
now=None,
resources=None,
host_info=None,
metrics=None,
):
address = self.coerce_address(address, resolve_address)
address = normalize_address(address)
if address not in self.workers:
return {"status": "missing"}
host = get_address_host(address)
local_now = time()
now = now or time()
assert metrics
host_info = host_info or {}
self.host_info[host]["last-seen"] = local_now
frac = 1 / len(self.workers)
self.bandwidth = (
self.bandwidth * (1 - frac) + metrics["bandwidth"]["total"] * frac
)
for other, (bw, count) in metrics["bandwidth"]["workers"].items():
if (address, other) not in self.bandwidth_workers:
self.bandwidth_workers[address, other] = bw / count
else:
alpha = (1 - frac) ** count
self.bandwidth_workers[address, other] = self.bandwidth_workers[
address, other
] * alpha + bw * (1 - alpha)
for typ, (bw, count) in metrics["bandwidth"]["types"].items():
if typ not in self.bandwidth_types:
self.bandwidth_types[typ] = bw / count
else:
alpha = (1 - frac) ** count
self.bandwidth_types[typ] = self.bandwidth_types[typ] * alpha + bw * (
1 - alpha
)
ws = self.workers[address]
ws.last_seen = time()
if metrics:
ws.metrics = metrics
if host_info:
self.host_info[host].update(host_info)
delay = time() - now
ws.time_delay = delay
if resources:
self.add_resources(worker=address, resources=resources)
self.log_event(address, merge({"action": "heartbeat"}, metrics))
return {
"status": "OK",
"time": time(),
"heartbeat-interval": heartbeat_interval(len(self.workers)),
}
async def add_worker(
self,
comm=None,
address=None,
keys=(),
nthreads=None,
name=None,
resolve_address=True,
nbytes=None,
types=None,
now=None,
resources=None,
host_info=None,
memory_limit=None,
metrics=None,
pid=0,
services=None,
local_directory=None,
versions=None,
nanny=None,
extra=None,
):
""" Add a new worker to the cluster """
with log_errors():
address = self.coerce_address(address, resolve_address)
address = normalize_address(address)
host = get_address_host(address)
ws = self.workers.get(address)
if ws is not None:
raise ValueError("Worker already exists %s" % ws)
if name in self.aliases:
msg = {
"status": "error",
"message": "name taken, %s" % name,
"time": time(),
}
if comm:
await comm.write(msg)
return
self.workers[address] = ws = WorkerState(
address=address,
pid=pid,
nthreads=nthreads,
memory_limit=memory_limit,
name=name,
local_directory=local_directory,
services=services,
versions=versions,
nanny=nanny,
extra=extra,
)
if "addresses" not in self.host_info[host]:
self.host_info[host].update({"addresses": set(), "nthreads": 0})
self.host_info[host]["addresses"].add(address)
self.host_info[host]["nthreads"] += nthreads
self.total_nthreads += nthreads
self.aliases[name] = address
response = self.heartbeat_worker(
address=address,
resolve_address=resolve_address,
now=now,
resources=resources,
host_info=host_info,
metrics=metrics,
)
# Do not need to adjust self.total_occupancy as self.occupancy[ws] cannot exist before this.
self.check_idle_saturated(ws)
# for key in keys: # TODO
# self.mark_key_in_memory(key, [address])
self.stream_comms[address] = BatchedSend(interval="5ms", loop=self.loop)
if ws.nthreads > len(ws.processing):
self.idle.add(ws)
for plugin in self.plugins[:]:
try:
result = plugin.add_worker(scheduler=self, worker=address)
if inspect.isawaitable(result):
await result
except Exception as e:
logger.exception(e)
if nbytes:
for key in nbytes:
ts = self.tasks.get(key)
if ts is not None and ts.state in ("processing", "waiting"):
recommendations = self.transition(
key,
"memory",
worker=address,
nbytes=nbytes[key],
typename=types[key],
)
self.transitions(recommendations)
recommendations = {}
for ts in list(self.unrunnable):
valid = self.valid_workers(ts)
if valid is True or ws in valid:
recommendations[ts.key] = "waiting"
if recommendations:
self.transitions(recommendations)
self.log_event(address, {"action": "add-worker"})
self.log_event("all", {"action": "add-worker", "worker": address})
logger.info("Register worker %s", ws)
msg = {
"status": "OK",
"time": time(),
"heartbeat-interval": heartbeat_interval(len(self.workers)),
"worker-plugins": self.worker_plugins,
}
version_warning = version_module.error_message(
version_module.get_versions(),
merge(
{w: ws.versions for w, ws in self.workers.items()},
{c: cs.versions for c, cs in self.clients.items() if cs.versions},
),
versions,
client_name="This Worker",
)
if version_warning:
msg["warning"] = version_warning
if comm:
await comm.write(msg)
await self.handle_worker(comm=comm, worker=address)
def update_graph(
self,
client=None,
tasks=None,
keys=None,
dependencies=None,
restrictions=None,
priority=None,
loose_restrictions=None,
resources=None,
submitting_task=None,
retries=None,
user_priority=0,
actors=None,
fifo_timeout=0,
):
"""
Add new computations to the internal dask graph
This happens whenever the Client calls submit, map, get, or compute.
"""
start = time()
fifo_timeout = parse_timedelta(fifo_timeout)
keys = set(keys)
if len(tasks) > 1:
self.log_event(
["all", client], {"action": "update_graph", "count": len(tasks)}
)
# Remove aliases
for k in list(tasks):
if tasks[k] is k:
del tasks[k]
dependencies = dependencies or {}
n = 0
while len(tasks) != n: # walk through new tasks, cancel any bad deps
n = len(tasks)
for k, deps in list(dependencies.items()):
if any(
dep not in self.tasks and dep not in tasks for dep in deps
): # bad key
logger.info("User asked for computation on lost data, %s", k)
del tasks[k]
del dependencies[k]
if k in keys:
keys.remove(k)
self.report({"op": "cancelled-key", "key": k}, client=client)
self.client_releases_keys(keys=[k], client=client)
# Remove any self-dependencies (happens on test_publish_bag() and others)
for k, v in dependencies.items():
deps = set(v)
if k in deps:
deps.remove(k)
dependencies[k] = deps
# Avoid computation that is already finished
already_in_memory = set() # tasks that are already done
for k, v in dependencies.items():
if v and k in self.tasks and self.tasks[k].state in ("memory", "erred"):
already_in_memory.add(k)
if already_in_memory:
dependents = dask.core.reverse_dict(dependencies)
stack = list(already_in_memory)
done = set(already_in_memory)
while stack: # remove unnecessary dependencies
key = stack.pop()
ts = self.tasks[key]
try:
deps = dependencies[key]
except KeyError:
deps = self.dependencies[key]
for dep in deps:
if dep in dependents:
child_deps = dependents[dep]
else:
child_deps = self.dependencies[dep]
if all(d in done for d in child_deps):
if dep in self.tasks and dep not in done:
done.add(dep)
stack.append(dep)
for d in done:
tasks.pop(d, None)
dependencies.pop(d, None)
# Get or create task states
stack = list(keys)
touched_keys = set()
touched_tasks = []
while stack:
k = stack.pop()
if k in touched_keys:
continue
# XXX Have a method get_task_state(self, k) ?
ts = self.tasks.get(k)
if ts is None:
ts = self.new_task(k, tasks.get(k), "released")
elif not ts.run_spec:
ts.run_spec = tasks.get(k)
touched_keys.add(k)
touched_tasks.append(ts)
stack.extend(dependencies.get(k, ()))
self.client_desires_keys(keys=keys, client=client)
# Add dependencies
for key, deps in dependencies.items():
ts = self.tasks.get(key)
if ts is None or ts.dependencies:
continue
for dep in deps:
dts = self.tasks[dep]
ts.add_dependency(dts)
# Compute priorities
if isinstance(user_priority, Number):
user_priority = {k: user_priority for k in tasks}
# Add actors
if actors is True:
actors = list(keys)
for actor in actors or []:
self.tasks[actor].actor = True
priority = priority or dask.order.order(
tasks
) # TODO: define order wrt old graph
if submitting_task: # sub-tasks get better priority than parent tasks
ts = self.tasks.get(submitting_task)
if ts is not None:
generation = ts.priority[0] - 0.01
else: # super-task already cleaned up
generation = self.generation
elif self._last_time + fifo_timeout < start:
self.generation += 1 # older graph generations take precedence
generation = self.generation
self._last_time = start
else:
generation = self.generation
for key in set(priority) & touched_keys:
ts = self.tasks[key]
if ts.priority is None:
ts.priority = (-(user_priority.get(key, 0)), generation, priority[key])
# Ensure all runnables have a priority
runnables = [ts for ts in touched_tasks if ts.run_spec]
for ts in runnables:
if ts.priority is None and ts.run_spec:
ts.priority = (self.generation, 0)
if restrictions:
# *restrictions* is a dict keying task ids to lists of
# restriction specifications (either worker names or addresses)
for k, v in restrictions.items():
if v is None:
continue
ts = self.tasks.get(k)
if ts is None:
continue
ts.host_restrictions = set()
ts.worker_restrictions = set()
for w in v:
try:
w = self.coerce_address(w)
except ValueError:
# Not a valid address, but perhaps it's a hostname
ts.host_restrictions.add(w)
else:
ts.worker_restrictions.add(w)
if loose_restrictions:
for k in loose_restrictions:
ts = self.tasks[k]
ts.loose_restrictions = True
if resources:
for k, v in resources.items():
if v is None:
continue
assert isinstance(v, dict)
ts = self.tasks.get(k)
if ts is None:
continue
ts.resource_restrictions = v
if retries:
for k, v in retries.items():
assert isinstance(v, int)
ts = self.tasks.get(k)
if ts is None:
continue
ts.retries = v
# Compute recommendations
recommendations = {}
for ts in sorted(runnables, key=operator.attrgetter("priority"), reverse=True):
if ts.state == "released" and ts.run_spec:
recommendations[ts.key] = "waiting"
for ts in touched_tasks:
for dts in ts.dependencies:
if dts.exception_blame:
ts.exception_blame = dts.exception_blame
recommendations[ts.key] = "erred"
break
for plugin in self.plugins[:]:
try:
plugin.update_graph(
self,
client=client,
tasks=tasks,
keys=keys,
restrictions=restrictions or {},
dependencies=dependencies,
priority=priority,
loose_restrictions=loose_restrictions,
resources=resources,
)
except Exception as e:
logger.exception(e)
self.transitions(recommendations)
for ts in touched_tasks:
if ts.state in ("memory", "erred"):
self.report_on_key(ts.key, client=client)
end = time()
if self.digests is not None:
self.digests["update-graph-duration"].add(end - start)
# TODO: balance workers
def new_task(self, key, spec, state):
""" Create a new task, and associated states """
ts = TaskState(key, spec)
ts._state = state
prefix_key = key_split(key)
try:
tp = self.task_prefixes[prefix_key]
except KeyError:
tp = self.task_prefixes[prefix_key] = TaskPrefix(prefix_key)
ts.prefix = tp
group_key = ts.group_key
try:
tg = self.task_groups[group_key]
except KeyError:
tg = self.task_groups[group_key] = TaskGroup(group_key)
tg.prefix = tp
tp.groups.append(tg)
tg.add(ts)
self.tasks[key] = ts
return ts
def stimulus_task_finished(self, key=None, worker=None, **kwargs):
""" Mark that a task has finished execution on a particular worker """
logger.debug("Stimulus task finished %s, %s", key, worker)
ts = self.tasks.get(key)
if ts is None:
return {}
ws = self.workers[worker]
if ts.state == "processing":
recommendations = self.transition(key, "memory", worker=worker, **kwargs)
if ts.state == "memory":
assert ws in ts.who_has
else:
logger.debug(
"Received already computed task, worker: %s, state: %s"
", key: %s, who_has: %s",
worker,
ts.state,
key,
ts.who_has,
)
if ws not in ts.who_has:
self.worker_send(worker, {"op": "release-task", "key": key})
recommendations = {}
return recommendations
def stimulus_task_erred(
self, key=None, worker=None, exception=None, traceback=None, **kwargs
):
""" Mark that a task has erred on a particular worker """
logger.debug("Stimulus task erred %s, %s", key, worker)
ts = self.tasks.get(key)
if ts is None:
return {}
if ts.state == "processing":
retries = ts.retries
if retries > 0:
ts.retries = retries - 1
recommendations = self.transition(key, "waiting")
else:
recommendations = self.transition(
key,
"erred",
cause=key,
exception=exception,
traceback=traceback,
worker=worker,
**kwargs,
)
else:
recommendations = {}
return recommendations
def stimulus_missing_data(
self, cause=None, key=None, worker=None, ensure=True, **kwargs
):
""" Mark that certain keys have gone missing. Recover. """
with log_errors():
logger.debug("Stimulus missing data %s, %s", key, worker)
ts = self.tasks.get(key)
if ts is None or ts.state == "memory":
return {}
cts = self.tasks.get(cause)
recommendations = {}
if cts is not None and cts.state == "memory": # couldn't find this
for ws in cts.who_has: # TODO: this behavior is extreme
ws.has_what.remove(cts)
ws.nbytes -= cts.get_nbytes()
cts.who_has.clear()
recommendations[cause] = "released"
if key:
recommendations[key] = "released"
self.transitions(recommendations)
if self.validate:
assert cause not in self.who_has
return {}
def stimulus_retry(self, comm=None, keys=None, client=None):
logger.info("Client %s requests to retry %d keys", client, len(keys))
if client:
self.log_event(client, {"action": "retry", "count": len(keys)})
stack = list(keys)
seen = set()
roots = []
while stack:
key = stack.pop()
seen.add(key)
erred_deps = [
dts.key for dts in self.tasks[key].dependencies if dts.state == "erred"
]
if erred_deps:
stack.extend(erred_deps)
else:
roots.append(key)
recommendations = {key: "waiting" for key in roots}
self.transitions(recommendations)
if self.validate:
for key in seen:
assert not self.tasks[key].exception_blame
return tuple(seen)
async def remove_worker(self, comm=None, address=None, safe=False, close=True):
"""
Remove worker from cluster
We do this when a worker reports that it plans to leave or when it
appears to be unresponsive. This may send its tasks back to a released
state.
"""
with log_errors():
if self.status == Status.closed:
return
address = self.coerce_address(address)
if address not in self.workers:
return "already-removed"
host = get_address_host(address)
ws = self.workers[address]
self.log_event(
["all", address],
{
"action": "remove-worker",
"worker": address,
"processing-tasks": dict(ws.processing),
},
)
logger.info("Remove worker %s", ws)
if close:
with suppress(AttributeError, CommClosedError):
self.stream_comms[address].send({"op": "close", "report": False})
self.remove_resources(address)
self.host_info[host]["nthreads"] -= ws.nthreads
self.host_info[host]["addresses"].remove(address)
self.total_nthreads -= ws.nthreads
if not self.host_info[host]["addresses"]:
del self.host_info[host]
self.rpc.remove(address)
del self.stream_comms[address]
del self.aliases[ws.name]
self.idle.discard(ws)
self.saturated.discard(ws)
del self.workers[address]
ws.status = "closed"
self.total_occupancy -= ws.occupancy
recommendations = {}
for ts in list(ws.processing):
k = ts.key
recommendations[k] = "released"
if not safe:
ts.suspicious += 1
ts.prefix.suspicious += 1
if ts.suspicious > self.allowed_failures:
del recommendations[k]
e = pickle.dumps(
KilledWorker(task=k, last_worker=ws.clean()), -1
)
r = self.transition(k, "erred", exception=e, cause=k)
recommendations.update(r)
for ts in ws.has_what:
ts.who_has.remove(ws)
if not ts.who_has:
if ts.run_spec:
recommendations[ts.key] = "released"
else: # pure data
recommendations[ts.key] = "forgotten"
ws.has_what.clear()
self.transitions(recommendations)
for plugin in self.plugins[:]:
try:
result = plugin.remove_worker(scheduler=self, worker=address)
if inspect.isawaitable(result):
await result
except Exception as e:
logger.exception(e)
if not self.workers:
logger.info("Lost all workers")
for w in self.workers:
self.bandwidth_workers.pop((address, w), None)
self.bandwidth_workers.pop((w, address), None)
def remove_worker_from_events():
# If the worker isn't registered anymore after the delay, remove from events
if address not in self.workers and address in self.events:
del self.events[address]
cleanup_delay = parse_timedelta(
dask.config.get("distributed.scheduler.events-cleanup-delay")
)
self.loop.call_later(cleanup_delay, remove_worker_from_events)
logger.debug("Removed worker %s", ws)
return "OK"
def stimulus_cancel(self, comm, keys=None, client=None, force=False):
""" Stop execution on a list of keys """
logger.info("Client %s requests to cancel %d keys", client, len(keys))
if client:
self.log_event(
client, {"action": "cancel", "count": len(keys), "force": force}
)
for key in keys:
self.cancel_key(key, client, force=force)
def cancel_key(self, key, client, retries=5, force=False):
""" Cancel a particular key and all dependents """
# TODO: this should be converted to use the transition mechanism
ts = self.tasks.get(key)
try:
cs = self.clients[client]
except KeyError:
return
if ts is None or not ts.who_wants: # no key yet, lets try again in a moment
if retries:
self.loop.call_later(
0.2, lambda: self.cancel_key(key, client, retries - 1)
)
return
if force or ts.who_wants == {cs}: # no one else wants this key
for dts in list(ts.dependents):
self.cancel_key(dts.key, client, force=force)
logger.info("Scheduler cancels key %s. Force=%s", key, force)
self.report({"op": "cancelled-key", "key": key})
clients = list(ts.who_wants) if force else [cs]
for c in clients:
self.client_releases_keys(keys=[key], client=c.client_key)
def client_desires_keys(self, keys=None, client=None):
cs = self.clients.get(client)
if cs is None:
# For publish, queues etc.
cs = self.clients[client] = ClientState(client)
for k in keys:
ts = self.tasks.get(k)
if ts is None:
# For publish, queues etc.
ts = self.new_task(k, None, "released")
ts.who_wants.add(cs)
cs.wants_what.add(ts)
if ts.state in ("memory", "erred"):
self.report_on_key(k, client=client)
def client_releases_keys(self, keys=None, client=None):
""" Remove keys from client desired list """
logger.debug("Client %s releases keys: %s", client, keys)
cs = self.clients[client]
tasks2 = set()
for key in list(keys):
ts = self.tasks.get(key)
if ts is not None and ts in cs.wants_what:
cs.wants_what.remove(ts)
s = ts.who_wants
s.remove(cs)
if not s:
tasks2.add(ts)
recommendations = {}
for ts in tasks2:
if not ts.dependents:
# No live dependents, can forget
recommendations[ts.key] = "forgotten"
elif ts.state != "erred" and not ts.waiters:
recommendations[ts.key] = "released"
self.transitions(recommendations)
def client_heartbeat(self, client=None):
""" Handle heartbeats from Client """
self.clients[client].last_seen = time()
###################
# Task Validation #
###################
def validate_released(self, key):
ts = self.tasks[key]
assert ts.state == "released"
assert not ts.waiters
assert not ts.waiting_on
assert not ts.who_has
assert not ts.processing_on
assert not any(ts in dts.waiters for dts in ts.dependencies)
assert ts not in self.unrunnable
def validate_waiting(self, key):
ts = self.tasks[key]
assert ts.waiting_on
assert not ts.who_has
assert not ts.processing_on
assert ts not in self.unrunnable
for dts in ts.dependencies:
# We are waiting on a dependency iff it's not stored
assert bool(dts.who_has) + (dts in ts.waiting_on) == 1
assert ts in dts.waiters # XXX even if dts.who_has?
def validate_processing(self, key):
ts = self.tasks[key]
assert not ts.waiting_on
ws = ts.processing_on
assert ws
assert ts in ws.processing
assert not ts.who_has
for dts in ts.dependencies:
assert dts.who_has
assert ts in dts.waiters
def validate_memory(self, key):
ts = self.tasks[key]
assert ts.who_has
assert not ts.processing_on
assert not ts.waiting_on
assert ts not in self.unrunnable
for dts in ts.dependents:
assert (dts in ts.waiters) == (dts.state in ("waiting", "processing"))
assert ts not in dts.waiting_on
def validate_no_worker(self, key):
ts = self.tasks[key]
assert ts in self.unrunnable
assert not ts.waiting_on
assert ts in self.unrunnable
assert not ts.processing_on
assert not ts.who_has
for dts in ts.dependencies:
assert dts.who_has
def validate_erred(self, key):
ts = self.tasks[key]
assert ts.exception_blame
assert not ts.who_has
def validate_key(self, key, ts=None):
try:
if ts is None:
ts = self.tasks.get(key)
if ts is None:
logger.debug("Key lost: %s", key)
else:
ts.validate()
try:
func = getattr(self, "validate_" + ts.state.replace("-", "_"))
except AttributeError:
logger.error(
"self.validate_%s not found", ts.state.replace("-", "_")
)
else:
func(key)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def validate_state(self, allow_overlap=False):
validate_state(self.tasks, self.workers, self.clients)
if not (set(self.workers) == set(self.stream_comms)):
raise ValueError("Workers not the same in all collections")
for w, ws in self.workers.items():
assert isinstance(w, str), (type(w), w)
assert isinstance(ws, WorkerState), (type(ws), ws)
assert ws.address == w
if not ws.processing:
assert not ws.occupancy
assert ws in self.idle
for k, ts in self.tasks.items():
assert isinstance(ts, TaskState), (type(ts), ts)
assert ts.key == k
self.validate_key(k, ts)
for c, cs in self.clients.items():
# client=None is often used in tests...
assert c is None or isinstance(c, str), (type(c), c)
assert isinstance(cs, ClientState), (type(cs), cs)
assert cs.client_key == c
a = {w: ws.nbytes for w, ws in self.workers.items()}
b = {
w: sum(ts.get_nbytes() for ts in ws.has_what)
for w, ws in self.workers.items()
}
assert a == b, (a, b)
actual_total_occupancy = 0
for worker, ws in self.workers.items():
assert abs(sum(ws.processing.values()) - ws.occupancy) < 1e-8
actual_total_occupancy += ws.occupancy
assert abs(actual_total_occupancy - self.total_occupancy) < 1e-8, (
actual_total_occupancy,
self.total_occupancy,
)
###################
# Manage Messages #
###################
def report(self, msg, ts=None, client=None):
"""
Publish updates to all listening Queues and Comms
If the message contains a key then we only send the message to those
comms that care about the key.
"""
comms = set()
if client is not None:
try:
comms.add(self.client_comms[client])
except KeyError:
pass
if ts is None and "key" in msg:
ts = self.tasks.get(msg["key"])
if ts is None:
# Notify all clients
comms |= set(self.client_comms.values())
else:
# Notify clients interested in key
comms |= {
self.client_comms[c.client_key]
for c in ts.who_wants
if c.client_key in self.client_comms
}
for c in comms:
try:
c.send(msg)
# logger.debug("Scheduler sends message to client %s", msg)
except CommClosedError:
if self.status == Status.running:
logger.critical("Tried writing to closed comm: %s", msg)
async def add_client(self, comm, client=None, versions=None):
""" Add client to network
We listen to all future messages from this Comm.
"""
assert client is not None
comm.name = "Scheduler->Client"
logger.info("Receive client connection: %s", client)
self.log_event(["all", client], {"action": "add-client", "client": client})
self.clients[client] = ClientState(client, versions=versions)
for plugin in self.plugins[:]:
try:
plugin.add_client(scheduler=self, client=client)
except Exception as e:
logger.exception(e)
try:
bcomm = BatchedSend(interval="2ms", loop=self.loop)
bcomm.start(comm)
self.client_comms[client] = bcomm
msg = {"op": "stream-start"}
version_warning = version_module.error_message(
version_module.get_versions(),
{w: ws.versions for w, ws in self.workers.items()},
versions,
)
if version_warning:
msg["warning"] = version_warning
bcomm.send(msg)
try:
await self.handle_stream(comm=comm, extra={"client": client})
finally:
self.remove_client(client=client)
logger.debug("Finished handling client %s", client)
finally:
if not comm.closed():
self.client_comms[client].send({"op": "stream-closed"})
try:
if not shutting_down():
await self.client_comms[client].close()
del self.client_comms[client]
if self.status == Status.running:
logger.info("Close client connection: %s", client)
except TypeError: # comm becomes None during GC
pass
def remove_client(self, client=None):
""" Remove client from network """
if self.status == Status.running:
logger.info("Remove client %s", client)
self.log_event(["all", client], {"action": "remove-client", "client": client})
try:
cs = self.clients[client]
except KeyError:
# XXX is this a legitimate condition?
pass
else:
self.client_releases_keys(
keys=[ts.key for ts in cs.wants_what], client=cs.client_key
)
del self.clients[client]
for plugin in self.plugins[:]:
try:
plugin.remove_client(scheduler=self, client=client)
except Exception as e:
logger.exception(e)
def remove_client_from_events():
# If the client isn't registered anymore after the delay, remove from events
if client not in self.clients and client in self.events:
del self.events[client]
cleanup_delay = parse_timedelta(
dask.config.get("distributed.scheduler.events-cleanup-delay")
)
self.loop.call_later(cleanup_delay, remove_client_from_events)
def send_task_to_worker(self, worker, key):
""" Send a single computational task to a worker """
try:
ts = self.tasks[key]
msg = {
"op": "compute-task",
"key": key,
"priority": ts.priority,
"duration": self.get_task_duration(ts),
}
if ts.resource_restrictions:
msg["resource_restrictions"] = ts.resource_restrictions
if ts.actor:
msg["actor"] = True
deps = ts.dependencies
if deps:
msg["who_has"] = {
dep.key: [ws.address for ws in dep.who_has] for dep in deps
}
msg["nbytes"] = {dep.key: dep.nbytes for dep in deps}
if self.validate and deps:
assert all(msg["who_has"].values())
task = ts.run_spec
if type(task) is dict:
msg.update(task)
else:
msg["task"] = task
self.worker_send(worker, msg)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def handle_uncaught_error(self, **msg):
logger.exception(clean_exception(**msg)[1])
def handle_task_finished(self, key=None, worker=None, **msg):
if worker not in self.workers:
return
validate_key(key)
r = self.stimulus_task_finished(key=key, worker=worker, **msg)
self.transitions(r)
def handle_task_erred(self, key=None, **msg):
r = self.stimulus_task_erred(key=key, **msg)
self.transitions(r)
def handle_release_data(self, key=None, worker=None, client=None, **msg):
ts = self.tasks.get(key)
if ts is None:
return
ws = self.workers[worker]
if ts.processing_on != ws:
return
r = self.stimulus_missing_data(key=key, ensure=False, **msg)
self.transitions(r)
def handle_missing_data(self, key=None, errant_worker=None, **kwargs):
logger.debug("handle missing data key=%s worker=%s", key, errant_worker)
self.log.append(("missing", key, errant_worker))
ts = self.tasks.get(key)
if ts is None or not ts.who_has:
return
if errant_worker in self.workers:
ws = self.workers[errant_worker]
if ws in ts.who_has:
ts.who_has.remove(ws)
ws.has_what.remove(ts)
ws.nbytes -= ts.get_nbytes()
if not ts.who_has:
if ts.run_spec:
self.transitions({key: "released"})
else:
self.transitions({key: "forgotten"})
def release_worker_data(self, comm=None, keys=None, worker=None):
ws = self.workers[worker]
tasks = {self.tasks[k] for k in keys}
removed_tasks = tasks & ws.has_what
ws.has_what -= removed_tasks
recommendations = {}
for ts in removed_tasks:
ws.nbytes -= ts.get_nbytes()
wh = ts.who_has
wh.remove(ws)
if not wh:
recommendations[ts.key] = "released"
if recommendations:
self.transitions(recommendations)
def handle_long_running(self, key=None, worker=None, compute_duration=None):
""" A task has seceded from the thread pool
We stop the task from being stolen in the future, and change task
duration accounting as if the task has stopped.
"""
ts = self.tasks[key]
if "stealing" in self.extensions:
self.extensions["stealing"].remove_key_from_stealable(ts)
ws = ts.processing_on
if ws is None:
logger.debug("Received long-running signal from duplicate task. Ignoring.")
return
if compute_duration:
old_duration = ts.prefix.duration_average or 0
new_duration = compute_duration
if not old_duration:
avg_duration = new_duration
else:
avg_duration = 0.5 * old_duration + 0.5 * new_duration
ts.prefix.duration_average = avg_duration
ws.occupancy -= ws.processing[ts]
self.total_occupancy -= ws.processing[ts]
ws.processing[ts] = 0
self.check_idle_saturated(ws)
async def handle_worker(self, comm=None, worker=None):
"""
Listen to responses from a single worker
This is the main loop for scheduler-worker interaction
See Also
--------
Scheduler.handle_client: Equivalent coroutine for clients
"""
comm.name = "Scheduler connection to worker"
worker_comm = self.stream_comms[worker]
worker_comm.start(comm)
logger.info("Starting worker compute stream, %s", worker)
try:
await self.handle_stream(comm=comm, extra={"worker": worker})
finally:
if worker in self.stream_comms:
worker_comm.abort()
await self.remove_worker(address=worker)
def add_plugin(self, plugin=None, idempotent=False, **kwargs):
"""
Add external plugin to scheduler
See https://distributed.readthedocs.io/en/latest/plugins.html
"""
if isinstance(plugin, type):
plugin = plugin(self, **kwargs)
if idempotent and any(isinstance(p, type(plugin)) for p in self.plugins):
return
self.plugins.append(plugin)
def remove_plugin(self, plugin):
""" Remove external plugin from scheduler """
self.plugins.remove(plugin)
def worker_send(self, worker, msg):
""" Send message to worker
This also handles connection failures by adding a callback to remove
the worker on the next cycle.
"""
try:
self.stream_comms[worker].send(msg)
except (CommClosedError, AttributeError):
self.loop.add_callback(self.remove_worker, address=worker)
############################
# Less common interactions #
############################
async def scatter(
self,
comm=None,
data=None,
workers=None,
client=None,
broadcast=False,
timeout=2,
):
""" Send data out to workers
See also
--------
Scheduler.broadcast:
"""
start = time()
while not self.workers:
await asyncio.sleep(0.2)
if time() > start + timeout:
raise TimeoutError("No workers found")
if workers is None:
nthreads = {w: ws.nthreads for w, ws in self.workers.items()}
else:
workers = [self.coerce_address(w) for w in workers]
nthreads = {w: self.workers[w].nthreads for w in workers}
assert isinstance(data, dict)
keys, who_has, nbytes = await scatter_to_workers(
nthreads, data, rpc=self.rpc, report=False
)
self.update_data(who_has=who_has, nbytes=nbytes, client=client)
if broadcast:
if broadcast == True: # noqa: E712
n = len(nthreads)
else:
n = broadcast
await self.replicate(keys=keys, workers=workers, n=n)
self.log_event(
[client, "all"], {"action": "scatter", "client": client, "count": len(data)}
)
return keys
async def gather(self, comm=None, keys=None, serializers=None):
""" Collect data in from workers """
keys = list(keys)
who_has = {}
for key in keys:
ts = self.tasks.get(key)
if ts is not None:
who_has[key] = [ws.address for ws in ts.who_has]
else:
who_has[key] = []
data, missing_keys, missing_workers = await gather_from_workers(
who_has, rpc=self.rpc, close=False, serializers=serializers
)
if not missing_keys:
result = {"status": "OK", "data": data}
else:
missing_states = [
(self.tasks[key].state if key in self.tasks else None)
for key in missing_keys
]
logger.exception(
"Couldn't gather keys %s state: %s workers: %s",
missing_keys,
missing_states,
missing_workers,
)
result = {"status": "error", "keys": missing_keys}
with log_errors():
# Remove suspicious workers from the scheduler but allow them to
# reconnect.
await asyncio.gather(
*[
self.remove_worker(address=worker, close=False)
for worker in missing_workers
]
)
for key, workers in missing_keys.items():
# Task may already be gone if it was held by a
# `missing_worker`
ts = self.tasks.get(key)
logger.exception(
"Workers don't have promised key: %s, %s",
str(workers),
str(key),
)
if not workers or ts is None:
continue
for worker in workers:
ws = self.workers.get(worker)
if ws is not None and ts in ws.has_what:
ws.has_what.remove(ts)
ts.who_has.remove(ws)
ws.nbytes -= ts.get_nbytes()
self.transitions({key: "released"})
self.log_event("all", {"action": "gather", "count": len(keys)})
return result
def clear_task_state(self):
# XXX what about nested state such as ClientState.wants_what
# (see also fire-and-forget...)
logger.info("Clear task state")
for collection in self._task_state_collections:
collection.clear()
async def restart(self, client=None, timeout=3):
""" Restart all workers. Reset local state. """
with log_errors():
n_workers = len(self.workers)
logger.info("Send lost future signal to clients")
for cs in self.clients.values():
self.client_releases_keys(
keys=[ts.key for ts in cs.wants_what], client=cs.client_key
)
nannies = {addr: ws.nanny for addr, ws in self.workers.items()}
for addr in list(self.workers):
try:
# Ask the worker to close if it doesn't have a nanny,
# otherwise the nanny will kill it anyway
await self.remove_worker(address=addr, close=addr not in nannies)
except Exception as e:
logger.info(
"Exception while restarting. This is normal", exc_info=True
)
self.clear_task_state()
for plugin in self.plugins[:]:
try:
plugin.restart(self)
except Exception as e:
logger.exception(e)
logger.debug("Send kill signal to nannies: %s", nannies)
nannies = [
rpc(nanny_address, connection_args=self.connection_args)
for nanny_address in nannies.values()
if nanny_address is not None
]
resps = All(
[
nanny.restart(
close=True, timeout=timeout * 0.8, executor_wait=False
)
for nanny in nannies
]
)
try:
resps = await asyncio.wait_for(resps, timeout)
except TimeoutError:
logger.error(
"Nannies didn't report back restarted within "
"timeout. Continuuing with restart process"
)
else:
if not all(resp == "OK" for resp in resps):
logger.error(
"Not all workers responded positively: %s", resps, exc_info=True
)
finally:
await asyncio.gather(*[nanny.close_rpc() for nanny in nannies])
self.clear_task_state()
with suppress(AttributeError):
for c in self._worker_coroutines:
c.cancel()
self.log_event([client, "all"], {"action": "restart", "client": client})
start = time()
while time() < start + 10 and len(self.workers) < n_workers:
await asyncio.sleep(0.01)
self.report({"op": "restart"})
async def broadcast(
self,
comm=None,
msg=None,
workers=None,
hosts=None,
nanny=False,
serializers=None,
):
""" Broadcast message to workers, return all results """
if workers is None or workers is True:
if hosts is None:
workers = list(self.workers)
else:
workers = []
if hosts is not None:
for host in hosts:
if host in self.host_info:
workers.extend(self.host_info[host]["addresses"])
# TODO replace with worker_list
if nanny:
addresses = [self.workers[w].nanny for w in workers]
else:
addresses = workers
async def send_message(addr):
comm = await self.rpc.connect(addr)
comm.name = "Scheduler Broadcast"
try:
resp = await send_recv(comm, close=True, serializers=serializers, **msg)
finally:
self.rpc.reuse(addr, comm)
return resp
results = await All(
[send_message(address) for address in addresses if address is not None]
)
return dict(zip(workers, results))
async def proxy(self, comm=None, msg=None, worker=None, serializers=None):
""" Proxy a communication through the scheduler to some other worker """
d = await self.broadcast(
comm=comm, msg=msg, workers=[worker], serializers=serializers
)
return d[worker]
async def _delete_worker_data(self, worker_address, keys):
""" Delete data from a worker and update the corresponding worker/task states
Parameters
----------
worker_address: str
Worker address to delete keys from
keys: List[str]
List of keys to delete on the specified worker
"""
await retry_operation(
self.rpc(addr=worker_address).delete_data, keys=list(keys), report=False
)
ws = self.workers[worker_address]
tasks = {self.tasks[key] for key in keys}
ws.has_what -= tasks
for ts in tasks:
ts.who_has.remove(ws)
ws.nbytes -= ts.get_nbytes()
self.log_event(ws.address, {"action": "remove-worker-data", "keys": keys})
async def rebalance(self, comm=None, keys=None, workers=None):
""" Rebalance keys so that each worker stores roughly equal bytes
**Policy**
This orders the workers by what fraction of bytes of the existing keys
they have. It walks down this list from most-to-least. At each worker
it sends the largest results it can find and sends them to the least
occupied worker until either the sender or the recipient are at the
average expected load.
"""
with log_errors():
async with self._lock:
if keys:
tasks = {self.tasks[k] for k in keys}
missing_data = [ts.key for ts in tasks if not ts.who_has]
if missing_data:
return {"status": "missing-data", "keys": missing_data}
else:
tasks = set(self.tasks.values())
if workers:
workers = {self.workers[w] for w in workers}
workers_by_task = {ts: ts.who_has & workers for ts in tasks}
else:
workers = set(self.workers.values())
workers_by_task = {ts: ts.who_has for ts in tasks}
tasks_by_worker = {ws: set() for ws in workers}
for k, v in workers_by_task.items():
for vv in v:
tasks_by_worker[vv].add(k)
worker_bytes = {
ws: sum(ts.get_nbytes() for ts in v)
for ws, v in tasks_by_worker.items()
}
avg = sum(worker_bytes.values()) / len(worker_bytes)
sorted_workers = list(
map(first, sorted(worker_bytes.items(), key=second, reverse=True))
)
recipients = iter(reversed(sorted_workers))
recipient = next(recipients)
msgs = [] # (sender, recipient, key)
for sender in sorted_workers[: len(workers) // 2]:
sender_keys = {
ts: ts.get_nbytes() for ts in tasks_by_worker[sender]
}
sender_keys = iter(
sorted(sender_keys.items(), key=second, reverse=True)
)
try:
while worker_bytes[sender] > avg:
while (
worker_bytes[recipient] < avg
and worker_bytes[sender] > avg
):
ts, nb = next(sender_keys)
if ts not in tasks_by_worker[recipient]:
tasks_by_worker[recipient].add(ts)
# tasks_by_worker[sender].remove(ts)
msgs.append((sender, recipient, ts))
worker_bytes[sender] -= nb
worker_bytes[recipient] += nb
if worker_bytes[sender] > avg:
recipient = next(recipients)
except StopIteration:
break
to_recipients = defaultdict(lambda: defaultdict(list))
to_senders = defaultdict(list)
for sender, recipient, ts in msgs:
to_recipients[recipient.address][ts.key].append(sender.address)
to_senders[sender.address].append(ts.key)
result = await asyncio.gather(
*(
retry_operation(self.rpc(addr=r).gather, who_has=v)
for r, v in to_recipients.items()
)
)
for r, v in to_recipients.items():
self.log_event(r, {"action": "rebalance", "who_has": v})
self.log_event(
"all",
{
"action": "rebalance",
"total-keys": len(tasks),
"senders": valmap(len, to_senders),
"recipients": valmap(len, to_recipients),
"moved_keys": len(msgs),
},
)
if not all(r["status"] == "OK" for r in result):
return {
"status": "missing-data",
"keys": tuple(
concat(
r["keys"].keys()
for r in result
if r["status"] == "missing-data"
)
),
}
for sender, recipient, ts in msgs:
assert ts.state == "memory"
ts.who_has.add(recipient)
recipient.has_what.add(ts)
recipient.nbytes += ts.get_nbytes()
self.log.append(
("rebalance", ts.key, time(), sender.address, recipient.address)
)
await asyncio.gather(
*(self._delete_worker_data(r, v) for r, v in to_senders.items())
)
return {"status": "OK"}
async def replicate(
self,
comm=None,
keys=None,
n=None,
workers=None,
branching_factor=2,
delete=True,
lock=True,
):
""" Replicate data throughout cluster
This performs a tree copy of the data throughout the network
individually on each piece of data.
Parameters
----------
keys: Iterable
list of keys to replicate
n: int
Number of replications we expect to see within the cluster
branching_factor: int, optional
The number of workers that can copy data in each generation.
The larger the branching factor, the more data we copy in
a single step, but the more a given worker risks being
swamped by data requests.
See also
--------
Scheduler.rebalance
"""
assert branching_factor > 0
async with self._lock if lock else empty_context:
workers = {self.workers[w] for w in self.workers_list(workers)}
if n is None:
n = len(workers)
else:
n = min(n, len(workers))
if n == 0:
raise ValueError("Can not use replicate to delete data")
tasks = {self.tasks[k] for k in keys}
missing_data = [ts.key for ts in tasks if not ts.who_has]
if missing_data:
return {"status": "missing-data", "keys": missing_data}
# Delete extraneous data
if delete:
del_worker_tasks = defaultdict(set)
for ts in tasks:
del_candidates = ts.who_has & workers
if len(del_candidates) > n:
for ws in random.sample(
del_candidates, len(del_candidates) - n
):
del_worker_tasks[ws].add(ts)
await asyncio.gather(
*(
self._delete_worker_data(ws.address, [t.key for t in tasks])
for ws, tasks in del_worker_tasks.items()
)
)
# Copy not-yet-filled data
while tasks:
gathers = defaultdict(dict)
for ts in list(tasks):
n_missing = n - len(ts.who_has & workers)
if n_missing <= 0:
# Already replicated enough
tasks.remove(ts)
continue
count = min(n_missing, branching_factor * len(ts.who_has))
assert count > 0
for ws in random.sample(workers - ts.who_has, count):
gathers[ws.address][ts.key] = [
wws.address for wws in ts.who_has
]
results = await asyncio.gather(
*(
retry_operation(self.rpc(addr=w).gather, who_has=who_has)
for w, who_has in gathers.items()
)
)
for w, v in zip(gathers, results):
if v["status"] == "OK":
self.add_keys(worker=w, keys=list(gathers[w]))
else:
logger.warning("Communication failed during replication: %s", v)
self.log_event(w, {"action": "replicate-add", "keys": gathers[w]})
self.log_event(
"all",
{
"action": "replicate",
"workers": list(workers),
"key-count": len(keys),
"branching-factor": branching_factor,
},
)
def workers_to_close(
self,
comm=None,
memory_ratio=None,
n=None,
key=None,
minimum=None,
target=None,
attribute="address",
):
"""
Find workers that we can close with low cost
This returns a list of workers that are good candidates to retire.
These workers are not running anything and are storing
relatively little data relative to their peers. If all workers are
idle then we still maintain enough workers to have enough RAM to store
our data, with a comfortable buffer.
This is for use with systems like ``distributed.deploy.adaptive``.
Parameters
----------
memory_factor: Number
Amount of extra space we want to have for our stored data.
Defaults two 2, or that we want to have twice as much memory as we
currently have data.
n: int
Number of workers to close
minimum: int
Minimum number of workers to keep around
key: Callable(WorkerState)
An optional callable mapping a WorkerState object to a group
affiliation. Groups will be closed together. This is useful when
closing workers must be done collectively, such as by hostname.
target: int
Target number of workers to have after we close
attribute : str
The attribute of the WorkerState object to return, like "address"
or "name". Defaults to "address".
Examples
--------
>>> scheduler.workers_to_close()
['tcp://192.168.0.1:1234', 'tcp://192.168.0.2:1234']
Group workers by hostname prior to closing
>>> scheduler.workers_to_close(key=lambda ws: ws.host)
['tcp://192.168.0.1:1234', 'tcp://192.168.0.1:4567']
Remove two workers
>>> scheduler.workers_to_close(n=2)
Keep enough workers to have twice as much memory as we we need.
>>> scheduler.workers_to_close(memory_ratio=2)
Returns
-------
to_close: list of worker addresses that are OK to close
See Also
--------
Scheduler.retire_workers
"""
if target is not None and n is None:
n = len(self.workers) - target
if n is not None:
if n < 0:
n = 0
target = len(self.workers) - n
if n is None and memory_ratio is None:
memory_ratio = 2
with log_errors():
if not n and all(ws.processing for ws in self.workers.values()):
return []
if key is None:
key = lambda ws: ws.address
if isinstance(key, bytes) and dask.config.get(
"distributed.scheduler.pickle"
):
key = pickle.loads(key)
groups = groupby(key, self.workers.values())
limit_bytes = {
k: sum(ws.memory_limit for ws in v) for k, v in groups.items()
}
group_bytes = {k: sum(ws.nbytes for ws in v) for k, v in groups.items()}
limit = sum(limit_bytes.values())
total = sum(group_bytes.values())
def _key(group):
is_idle = not any(ws.processing for ws in groups[group])
bytes = -group_bytes[group]
return (is_idle, bytes)
idle = sorted(groups, key=_key)
to_close = []
n_remain = len(self.workers)
while idle:
group = idle.pop()
if n is None and any(ws.processing for ws in groups[group]):
break
if minimum and n_remain - len(groups[group]) < minimum:
break
limit -= limit_bytes[group]
if (n is not None and n_remain - len(groups[group]) >= target) or (
memory_ratio is not None and limit >= memory_ratio * total
):
to_close.append(group)
n_remain -= len(groups[group])
else:
break
result = [getattr(ws, attribute) for g in to_close for ws in groups[g]]
if result:
logger.debug("Suggest closing workers: %s", result)
return result
async def retire_workers(
self,
comm=None,
workers=None,
remove=True,
close_workers=False,
names=None,
lock=True,
**kwargs,
):
""" Gracefully retire workers from cluster
Parameters
----------
workers: list (optional)
List of worker addresses to retire.
If not provided we call ``workers_to_close`` which finds a good set
workers_names: list (optional)
List of worker names to retire.
remove: bool (defaults to True)
Whether or not to remove the worker metadata immediately or else
wait for the worker to contact us
close_workers: bool (defaults to False)
Whether or not to actually close the worker explicitly from here.
Otherwise we expect some external job scheduler to finish off the
worker.
**kwargs: dict
Extra options to pass to workers_to_close to determine which
workers we should drop
Returns
-------
Dictionary mapping worker ID/address to dictionary of information about
that worker for each retired worker.
See Also
--------
Scheduler.workers_to_close
"""
with log_errors():
async with self._lock if lock else empty_context:
if names is not None:
if names:
logger.info("Retire worker names %s", names)
names = set(map(str, names))
workers = [
ws.address
for ws in self.workers.values()
if str(ws.name) in names
]
if workers is None:
while True:
try:
workers = self.workers_to_close(**kwargs)
if workers:
workers = await self.retire_workers(
workers=workers,
remove=remove,
close_workers=close_workers,
lock=False,
)
return workers
except KeyError: # keys left during replicate
pass
workers = {self.workers[w] for w in workers if w in self.workers}
if not workers:
return []
logger.info("Retire workers %s", workers)
# Keys orphaned by retiring those workers
keys = set.union(*[w.has_what for w in workers])
keys = {ts.key for ts in keys if ts.who_has.issubset(workers)}
other_workers = set(self.workers.values()) - workers
if keys:
if other_workers:
logger.info("Moving %d keys to other workers", len(keys))
await self.replicate(
keys=keys,
workers=[ws.address for ws in other_workers],
n=1,
delete=False,
lock=False,
)
else:
return []
worker_keys = {ws.address: ws.identity() for ws in workers}
if close_workers and worker_keys:
await asyncio.gather(
*[self.close_worker(worker=w, safe=True) for w in worker_keys]
)
if remove:
await asyncio.gather(
*[self.remove_worker(address=w, safe=True) for w in worker_keys]
)
self.log_event(
"all",
{
"action": "retire-workers",
"workers": worker_keys,
"moved-keys": len(keys),
},
)
self.log_event(list(worker_keys), {"action": "retired"})
return worker_keys
def add_keys(self, comm=None, worker=None, keys=()):
"""
Learn that a worker has certain keys
This should not be used in practice and is mostly here for legacy
reasons. However, it is sent by workers from time to time.
"""
if worker not in self.workers:
return "not found"
ws = self.workers[worker]
for key in keys:
ts = self.tasks.get(key)
if ts is not None and ts.state == "memory":
if ts not in ws.has_what:
ws.nbytes += ts.get_nbytes()
ws.has_what.add(ts)
ts.who_has.add(ws)
else:
self.worker_send(
worker, {"op": "delete-data", "keys": [key], "report": False}
)
return "OK"
def update_data(
self, comm=None, who_has=None, nbytes=None, client=None, serializers=None
):
"""
Learn that new data has entered the network from an external source
See Also
--------
Scheduler.mark_key_in_memory
"""
with log_errors():
who_has = {
k: [self.coerce_address(vv) for vv in v] for k, v in who_has.items()
}
logger.debug("Update data %s", who_has)
for key, workers in who_has.items():
ts = self.tasks.get(key)
if ts is None:
ts = self.new_task(key, None, "memory")
ts.state = "memory"
if key in nbytes:
ts.set_nbytes(nbytes[key])
for w in workers:
ws = self.workers[w]
if ts not in ws.has_what:
ws.nbytes += ts.get_nbytes()
ws.has_what.add(ts)
ts.who_has.add(ws)
self.report(
{"op": "key-in-memory", "key": key, "workers": list(workers)}
)
if client:
self.client_desires_keys(keys=list(who_has), client=client)
def report_on_key(self, key=None, ts=None, client=None):
assert (key is None) + (ts is None) == 1, (key, ts)
if ts is None:
try:
ts = self.tasks[key]
except KeyError:
self.report({"op": "cancelled-key", "key": key}, client=client)
return
else:
key = ts.key
if ts.state == "forgotten":
self.report({"op": "cancelled-key", "key": key}, ts=ts, client=client)
elif ts.state == "memory":
self.report({"op": "key-in-memory", "key": key}, ts=ts, client=client)
elif ts.state == "erred":
failing_ts = ts.exception_blame
self.report(
{
"op": "task-erred",
"key": key,
"exception": failing_ts.exception,
"traceback": failing_ts.traceback,
},
ts=ts,
client=client,
)
async def feed(
self, comm, function=None, setup=None, teardown=None, interval="1s", **kwargs
):
"""
Provides a data Comm to external requester
Caution: this runs arbitrary Python code on the scheduler. This should
eventually be phased out. It is mostly used by diagnostics.
"""
if not dask.config.get("distributed.scheduler.pickle"):
logger.warn(
"Tried to call 'feed' route with custom functions, but "
"pickle is disallowed. Set the 'distributed.scheduler.pickle'"
"config value to True to use the 'feed' route (this is mostly "
"commonly used with progress bars)"
)
return
interval = parse_timedelta(interval)
with log_errors():
if function:
function = pickle.loads(function)
if setup:
setup = pickle.loads(setup)
if teardown:
teardown = pickle.loads(teardown)
state = setup(self) if setup else None
if inspect.isawaitable(state):
state = await state
try:
while self.status == Status.running:
if state is None:
response = function(self)
else:
response = function(self, state)
await comm.write(response)
await asyncio.sleep(interval)
except (EnvironmentError, CommClosedError):
pass
finally:
if teardown:
teardown(self, state)
def subscribe_worker_status(self, comm=None):
WorkerStatusPlugin(self, comm)
ident = self.identity()
for v in ident["workers"].values():
del v["metrics"]
del v["last_seen"]
return ident
def get_processing(self, comm=None, workers=None):
if workers is not None:
workers = set(map(self.coerce_address, workers))
return {w: [ts.key for ts in self.workers[w].processing] for w in workers}
else:
return {
w: [ts.key for ts in ws.processing] for w, ws in self.workers.items()
}
def get_who_has(self, comm=None, keys=None):
if keys is not None:
return {
k: [ws.address for ws in self.tasks[k].who_has]
if k in self.tasks
else []
for k in keys
}
else:
return {
key: [ws.address for ws in ts.who_has] for key, ts in self.tasks.items()
}
def get_has_what(self, comm=None, workers=None):
if workers is not None:
workers = map(self.coerce_address, workers)
return {
w: [ts.key for ts in self.workers[w].has_what]
if w in self.workers
else []
for w in workers
}
else:
return {w: [ts.key for ts in ws.has_what] for w, ws in self.workers.items()}
def get_ncores(self, comm=None, workers=None):
if workers is not None:
workers = map(self.coerce_address, workers)
return {w: self.workers[w].nthreads for w in workers if w in self.workers}
else:
return {w: ws.nthreads for w, ws in self.workers.items()}
async def get_call_stack(self, comm=None, keys=None):
if keys is not None:
stack = list(keys)
processing = set()
while stack:
key = stack.pop()
ts = self.tasks[key]
if ts.state == "waiting":
stack.extend(dts.key for dts in ts.dependencies)
elif ts.state == "processing":
processing.add(ts)
workers = defaultdict(list)
for ts in processing:
if ts.processing_on:
workers[ts.processing_on.address].append(ts.key)
else:
workers = {w: None for w in self.workers}
if not workers:
return {}
results = await asyncio.gather(
*(self.rpc(w).call_stack(keys=v) for w, v in workers.items())
)
response = {w: r for w, r in zip(workers, results) if r}
return response
def get_nbytes(self, comm=None, keys=None, summary=True):
with log_errors():
if keys is not None:
result = {k: self.tasks[k].nbytes for k in keys}
else:
result = {
k: ts.nbytes
for k, ts in self.tasks.items()
if ts.nbytes is not None
}
if summary:
out = defaultdict(lambda: 0)
for k, v in result.items():
out[key_split(k)] += v
result = dict(out)
return result
def get_comm_cost(self, ts, ws):
"""
Get the estimated communication cost (in s.) to compute the task
on the given worker.
"""
return sum(dts.nbytes for dts in ts.dependencies - ws.has_what) / self.bandwidth
def get_task_duration(self, ts, default=None):
"""
Get the estimated computation cost of the given task
(not including any communication cost).
"""
duration = ts.prefix.duration_average
if duration is None:
self.unknown_durations[ts.prefix.name].add(ts)
if default is None:
default = parse_timedelta(
dask.config.get("distributed.scheduler.unknown-task-duration")
)
return default
return duration
def run_function(self, stream, function, args=(), kwargs={}, wait=True):
""" Run a function within this process
See Also
--------
Client.run_on_scheduler:
"""
from .worker import run
self.log_event("all", {"action": "run-function", "function": function})
return run(self, stream, function=function, args=args, kwargs=kwargs, wait=wait)
def set_metadata(self, comm=None, keys=None, value=None):
try:
metadata = self.task_metadata
for key in keys[:-1]:
if key not in metadata or not isinstance(metadata[key], (dict, list)):
metadata[key] = dict()
metadata = metadata[key]
metadata[keys[-1]] = value
except Exception as e:
import pdb
pdb.set_trace()
def get_metadata(self, comm=None, keys=None, default=no_default):
metadata = self.task_metadata
for key in keys[:-1]:
metadata = metadata[key]
try:
return metadata[keys[-1]]
except KeyError:
if default != no_default:
return default
else:
raise
def get_task_status(self, comm=None, keys=None):
return {
key: (self.tasks[key].state if key in self.tasks else None) for key in keys
}
def get_task_stream(self, comm=None, start=None, stop=None, count=None):
from distributed.diagnostics.task_stream import TaskStreamPlugin
self.add_plugin(TaskStreamPlugin, idempotent=True)
ts = [p for p in self.plugins if isinstance(p, TaskStreamPlugin)][0]
return ts.collect(start=start, stop=stop, count=count)
async def register_worker_plugin(self, comm, plugin, name=None):
""" Registers a setup function, and call it on every worker """
self.worker_plugins.append(plugin)
responses = await self.broadcast(
msg=dict(op="plugin-add", plugin=plugin, name=name)
)
return responses
#####################
# State Transitions #
#####################
def _remove_from_processing(self, ts, send_worker_msg=None):
"""
Remove *ts* from the set of processing tasks.
"""
ws = ts.processing_on
ts.processing_on = None
w = ws.address
if w in self.workers: # may have been removed
duration = ws.processing.pop(ts)
if not ws.processing:
self.total_occupancy -= ws.occupancy
ws.occupancy = 0
else:
self.total_occupancy -= duration
ws.occupancy -= duration
self.check_idle_saturated(ws)
self.release_resources(ts, ws)
if send_worker_msg:
self.worker_send(w, send_worker_msg)
def _add_to_memory(
self, ts, ws, recommendations, type=None, typename=None, **kwargs
):
"""
Add *ts* to the set of in-memory tasks.
"""
if self.validate:
assert ts not in ws.has_what
ts.who_has.add(ws)
ws.has_what.add(ts)
ws.nbytes += ts.get_nbytes()
deps = ts.dependents
if len(deps) > 1:
deps = sorted(deps, key=operator.attrgetter("priority"), reverse=True)
for dts in deps:
s = dts.waiting_on
if ts in s:
s.discard(ts)
if not s: # new task ready to run
recommendations[dts.key] = "processing"
for dts in ts.dependencies:
s = dts.waiters
s.discard(ts)
if not s and not dts.who_wants:
recommendations[dts.key] = "released"
if not ts.waiters and not ts.who_wants:
recommendations[ts.key] = "released"
else:
msg = {"op": "key-in-memory", "key": ts.key}
if type is not None:
msg["type"] = type
self.report(msg)
ts.state = "memory"
ts.type = typename
ts.group.types.add(typename)
cs = self.clients["fire-and-forget"]
if ts in cs.wants_what:
self.client_releases_keys(client="fire-and-forget", keys=[ts.key])
def transition_released_waiting(self, key):
try:
ts = self.tasks[key]
if self.validate:
assert ts.run_spec
assert not ts.waiting_on
assert not ts.who_has
assert not ts.processing_on
assert not any(dts.state == "forgotten" for dts in ts.dependencies)
if ts.has_lost_dependencies:
return {key: "forgotten"}
ts.state = "waiting"
recommendations = {}
for dts in ts.dependencies:
if dts.exception_blame:
ts.exception_blame = dts.exception_blame
recommendations[key] = "erred"
return recommendations
for dts in ts.dependencies:
dep = dts.key
if not dts.who_has:
ts.waiting_on.add(dts)
if dts.state == "released":
recommendations[dep] = "waiting"
else:
dts.waiters.add(ts)
ts.waiters = {dts for dts in ts.dependents if dts.state == "waiting"}
if not ts.waiting_on:
if self.workers:
recommendations[key] = "processing"
else:
self.unrunnable.add(ts)
ts.state = "no-worker"
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_no_worker_waiting(self, key):
try:
ts = self.tasks[key]
if self.validate:
assert ts in self.unrunnable
assert not ts.waiting_on
assert not ts.who_has
assert not ts.processing_on
self.unrunnable.remove(ts)
if ts.has_lost_dependencies:
return {key: "forgotten"}
recommendations = {}
for dts in ts.dependencies:
dep = dts.key
if not dts.who_has:
ts.waiting_on.add(dts)
if dts.state == "released":
recommendations[dep] = "waiting"
else:
dts.waiters.add(ts)
ts.state = "waiting"
if not ts.waiting_on:
if self.workers:
recommendations[key] = "processing"
else:
self.unrunnable.add(ts)
ts.state = "no-worker"
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def decide_worker(self, ts):
"""
Decide on a worker for task *ts*. Return a WorkerState.
"""
valid_workers = self.valid_workers(ts)
if not valid_workers and not ts.loose_restrictions and self.workers:
self.unrunnable.add(ts)
ts.state = "no-worker"
return None
if ts.dependencies or valid_workers is not True:
worker = decide_worker(
ts,
self.workers.values(),
valid_workers,
partial(self.worker_objective, ts),
)
elif self.idle:
if len(self.idle) < 20: # smart but linear in small case
worker = min(self.idle, key=operator.attrgetter("occupancy"))
else: # dumb but fast in large case
worker = self.idle[self.n_tasks % len(self.idle)]
else:
if len(self.workers) < 20: # smart but linear in small case
worker = min(
self.workers.values(), key=operator.attrgetter("occupancy")
)
else: # dumb but fast in large case
worker = self.workers.values()[self.n_tasks % len(self.workers)]
if self.validate:
assert worker is None or isinstance(worker, WorkerState), (
type(worker),
worker,
)
assert worker.address in self.workers
return worker
def transition_waiting_processing(self, key):
try:
ts = self.tasks[key]
if self.validate:
assert not ts.waiting_on
assert not ts.who_has
assert not ts.exception_blame
assert not ts.processing_on
assert not ts.has_lost_dependencies
assert ts not in self.unrunnable
assert all(dts.who_has for dts in ts.dependencies)
ws = self.decide_worker(ts)
if ws is None:
return {}
worker = ws.address
duration = self.get_task_duration(ts)
comm = self.get_comm_cost(ts, ws)
ws.processing[ts] = duration + comm
ts.processing_on = ws
ws.occupancy += duration + comm
self.total_occupancy += duration + comm
ts.state = "processing"
self.consume_resources(ts, ws)
self.check_idle_saturated(ws)
self.n_tasks += 1
if ts.actor:
ws.actors.add(ts)
# logger.debug("Send job to worker: %s, %s", worker, key)
self.send_task_to_worker(worker, key)
return {}
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_waiting_memory(self, key, nbytes=None, worker=None, **kwargs):
try:
ws = self.workers[worker]
ts = self.tasks[key]
if self.validate:
assert not ts.processing_on
assert ts.waiting_on
assert ts.state == "waiting"
ts.waiting_on.clear()
if nbytes is not None:
ts.set_nbytes(nbytes)
self.check_idle_saturated(ws)
recommendations = {}
self._add_to_memory(ts, ws, recommendations, **kwargs)
if self.validate:
assert not ts.processing_on
assert not ts.waiting_on
assert ts.who_has
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_processing_memory(
self,
key,
nbytes=None,
type=None,
typename=None,
worker=None,
startstops=None,
**kwargs,
):
try:
ts = self.tasks[key]
assert worker
assert isinstance(worker, str)
if self.validate:
assert ts.processing_on
ws = ts.processing_on
assert ts in ws.processing
assert not ts.waiting_on
assert not ts.who_has, (ts, ts.who_has)
assert not ts.exception_blame
assert ts.state == "processing"
ws = self.workers.get(worker)
if ws is None:
return {key: "released"}
if ws != ts.processing_on: # someone else has this task
logger.info(
"Unexpected worker completed task, likely due to"
" work stealing. Expected: %s, Got: %s, Key: %s",
ts.processing_on,
ws,
key,
)
return {}
if startstops:
L = list()
for startstop in startstops:
stop = startstop["stop"]
start = startstop["start"]
action = startstop["action"]
if action == "compute":
L.append((start, stop))
# record timings of all actions -- a cheaper way of
# getting timing info compared with get_task_stream()
ts.prefix.all_durations[action] += stop - start
if len(L) > 0:
compute_start, compute_stop = L[0]
else: # This is very rare
compute_start = compute_stop = None
else:
compute_start = compute_stop = None
#############################
# Update Timing Information #
#############################
if compute_start and ws.processing.get(ts, True):
# Update average task duration for worker
old_duration = ts.prefix.duration_average or 0
new_duration = compute_stop - compute_start
if not old_duration:
avg_duration = new_duration
else:
avg_duration = 0.5 * old_duration + 0.5 * new_duration
ts.prefix.duration_average = avg_duration
ts.group.duration += new_duration
for tts in self.unknown_durations.pop(ts.prefix.name, ()):
if tts.processing_on:
wws = tts.processing_on
old = wws.processing[tts]
comm = self.get_comm_cost(tts, wws)
wws.processing[tts] = avg_duration + comm
wws.occupancy += avg_duration + comm - old
self.total_occupancy += avg_duration + comm - old
############################
# Update State Information #
############################
if nbytes is not None:
ts.set_nbytes(nbytes)
recommendations = {}
self._remove_from_processing(ts)
self._add_to_memory(ts, ws, recommendations, type=type, typename=typename)
if self.validate:
assert not ts.processing_on
assert not ts.waiting_on
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_memory_released(self, key, safe=False):
try:
ts = self.tasks[key]
if self.validate:
assert not ts.waiting_on
assert not ts.processing_on
if safe:
assert not ts.waiters
if ts.actor:
for ws in ts.who_has:
ws.actors.discard(ts)
if ts.who_wants:
ts.exception_blame = ts
ts.exception = "Worker holding Actor was lost"
return {ts.key: "erred"} # don't try to recreate
recommendations = {}
for dts in ts.waiters:
if dts.state in ("no-worker", "processing"):
recommendations[dts.key] = "waiting"
elif dts.state == "waiting":
dts.waiting_on.add(ts)
# XXX factor this out?
for ws in ts.who_has:
ws.has_what.remove(ts)
ws.nbytes -= ts.get_nbytes()
ts.group.nbytes_in_memory -= ts.get_nbytes()
self.worker_send(
ws.address, {"op": "delete-data", "keys": [key], "report": False}
)
ts.who_has.clear()
ts.state = "released"
self.report({"op": "lost-data", "key": key})
if not ts.run_spec: # pure data
recommendations[key] = "forgotten"
elif ts.has_lost_dependencies:
recommendations[key] = "forgotten"
elif ts.who_wants or ts.waiters:
recommendations[key] = "waiting"
if self.validate:
assert not ts.waiting_on
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_released_erred(self, key):
try:
ts = self.tasks[key]
if self.validate:
with log_errors(pdb=LOG_PDB):
assert ts.exception_blame
assert not ts.who_has
assert not ts.waiting_on
assert not ts.waiters
recommendations = {}
failing_ts = ts.exception_blame
for dts in ts.dependents:
dts.exception_blame = failing_ts
if not dts.who_has:
recommendations[dts.key] = "erred"
self.report(
{
"op": "task-erred",
"key": key,
"exception": failing_ts.exception,
"traceback": failing_ts.traceback,
}
)
ts.state = "erred"
# TODO: waiting data?
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_erred_released(self, key):
try:
ts = self.tasks[key]
if self.validate:
with log_errors(pdb=LOG_PDB):
assert all(dts.state != "erred" for dts in ts.dependencies)
assert ts.exception_blame
assert not ts.who_has
assert not ts.waiting_on
assert not ts.waiters
recommendations = {}
ts.exception = None
ts.exception_blame = None
ts.traceback = None
for dep in ts.dependents:
if dep.state == "erred":
recommendations[dep.key] = "waiting"
self.report({"op": "task-retried", "key": key})
ts.state = "released"
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_waiting_released(self, key):
try:
ts = self.tasks[key]
if self.validate:
assert not ts.who_has
assert not ts.processing_on
recommendations = {}
for dts in ts.dependencies:
s = dts.waiters
if ts in s:
s.discard(ts)
if not s and not dts.who_wants:
recommendations[dts.key] = "released"
ts.waiting_on.clear()
ts.state = "released"
if ts.has_lost_dependencies:
recommendations[key] = "forgotten"
elif not ts.exception_blame and (ts.who_wants or ts.waiters):
recommendations[key] = "waiting"
else:
ts.waiters.clear()
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_processing_released(self, key):
try:
ts = self.tasks[key]
if self.validate:
assert ts.processing_on
assert not ts.who_has
assert not ts.waiting_on
assert self.tasks[key].state == "processing"
self._remove_from_processing(
ts, send_worker_msg={"op": "release-task", "key": key}
)
ts.state = "released"
recommendations = {}
if ts.has_lost_dependencies:
recommendations[key] = "forgotten"
elif ts.waiters or ts.who_wants:
recommendations[key] = "waiting"
if recommendations.get(key) != "waiting":
for dts in ts.dependencies:
if dts.state != "released":
s = dts.waiters
s.discard(ts)
if not s and not dts.who_wants:
recommendations[dts.key] = "released"
ts.waiters.clear()
if self.validate:
assert not ts.processing_on
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_processing_erred(
self, key, cause=None, exception=None, traceback=None, **kwargs
):
try:
ts = self.tasks[key]
if self.validate:
assert cause or ts.exception_blame
assert ts.processing_on
assert not ts.who_has
assert not ts.waiting_on
if ts.actor:
ws = ts.processing_on
ws.actors.remove(ts)
self._remove_from_processing(ts)
if exception is not None:
ts.exception = exception
if traceback is not None:
ts.traceback = traceback
if cause is not None:
failing_ts = self.tasks[cause]
ts.exception_blame = failing_ts
else:
failing_ts = ts.exception_blame
recommendations = {}
for dts in ts.dependents:
dts.exception_blame = failing_ts
recommendations[dts.key] = "erred"
for dts in ts.dependencies:
s = dts.waiters
s.discard(ts)
if not s and not dts.who_wants:
recommendations[dts.key] = "released"
ts.waiters.clear() # do anything with this?
ts.state = "erred"
self.report(
{
"op": "task-erred",
"key": key,
"exception": failing_ts.exception,
"traceback": failing_ts.traceback,
}
)
cs = self.clients["fire-and-forget"]
if ts in cs.wants_what:
self.client_releases_keys(client="fire-and-forget", keys=[key])
if self.validate:
assert not ts.processing_on
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_no_worker_released(self, key):
try:
ts = self.tasks[key]
if self.validate:
assert self.tasks[key].state == "no-worker"
assert not ts.who_has
assert not ts.waiting_on
self.unrunnable.remove(ts)
ts.state = "released"
for dts in ts.dependencies:
dts.waiters.discard(ts)
ts.waiters.clear()
return {}
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def remove_key(self, key):
ts = self.tasks.pop(key)
assert ts.state == "forgotten"
self.unrunnable.discard(ts)
for cs in ts.who_wants:
cs.wants_what.remove(ts)
ts.who_wants.clear()
ts.processing_on = None
ts.exception_blame = ts.exception = ts.traceback = None
if key in self.task_metadata:
del self.task_metadata[key]
def _propagate_forgotten(self, ts, recommendations):
ts.state = "forgotten"
key = ts.key
for dts in ts.dependents:
dts.has_lost_dependencies = True
dts.dependencies.remove(ts)
dts.waiting_on.discard(ts)
if dts.state not in ("memory", "erred"):
# Cannot compute task anymore
recommendations[dts.key] = "forgotten"
ts.dependents.clear()
ts.waiters.clear()
for dts in ts.dependencies:
dts.dependents.remove(ts)
s = dts.waiters
s.discard(ts)
if not dts.dependents and not dts.who_wants:
# Task not needed anymore
assert dts is not ts
recommendations[dts.key] = "forgotten"
ts.dependencies.clear()
ts.waiting_on.clear()
if ts.who_has:
ts.group.nbytes_in_memory -= ts.get_nbytes()
for ws in ts.who_has:
ws.has_what.remove(ts)
ws.nbytes -= ts.get_nbytes()
w = ws.address
if w in self.workers: # in case worker has died
self.worker_send(
w, {"op": "delete-data", "keys": [key], "report": False}
)
ts.who_has.clear()
def transition_memory_forgotten(self, key):
try:
ts = self.tasks[key]
if self.validate:
assert ts.state == "memory"
assert not ts.processing_on
assert not ts.waiting_on
if not ts.run_spec:
# It's ok to forget a pure data task
pass
elif ts.has_lost_dependencies:
# It's ok to forget a task with forgotten dependencies
pass
elif not ts.who_wants and not ts.waiters and not ts.dependents:
# It's ok to forget a task that nobody needs
pass
else:
assert 0, (ts,)
recommendations = {}
if ts.actor:
for ws in ts.who_has:
ws.actors.discard(ts)
self._propagate_forgotten(ts, recommendations)
self.report_on_key(ts=ts)
self.remove_key(key)
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_released_forgotten(self, key):
try:
ts = self.tasks[key]
if self.validate:
assert ts.state in ("released", "erred")
assert not ts.who_has
assert not ts.processing_on
assert not ts.waiting_on, (ts, ts.waiting_on)
if not ts.run_spec:
# It's ok to forget a pure data task
pass
elif ts.has_lost_dependencies:
# It's ok to forget a task with forgotten dependencies
pass
elif not ts.who_wants and not ts.waiters and not ts.dependents:
# It's ok to forget a task that nobody needs
pass
else:
assert 0, (ts,)
recommendations = {}
self._propagate_forgotten(ts, recommendations)
self.report_on_key(ts=ts)
self.remove_key(key)
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition(self, key, finish, *args, **kwargs):
""" Transition a key from its current state to the finish state
Examples
--------
>>> self.transition('x', 'waiting')
{'x': 'processing'}
Returns
-------
Dictionary of recommendations for future transitions
See Also
--------
Scheduler.transitions: transitive version of this function
"""
try:
try:
ts = self.tasks[key]
except KeyError:
return {}
start = ts.state
if start == finish:
return {}
if self.plugins:
dependents = set(ts.dependents)
dependencies = set(ts.dependencies)
if (start, finish) in self._transitions:
func = self._transitions[start, finish]
recommendations = func(key, *args, **kwargs)
elif "released" not in (start, finish):
func = self._transitions["released", finish]
assert not args and not kwargs
a = self.transition(key, "released")
if key in a:
func = self._transitions["released", a[key]]
b = func(key)
a = a.copy()
a.update(b)
recommendations = a
start = "released"
else:
raise RuntimeError(
"Impossible transition from %r to %r" % (start, finish)
)
finish2 = ts.state
self.transition_log.append((key, start, finish2, recommendations, time()))
if self.validate:
logger.debug(
"Transitioned %r %s->%s (actual: %s). Consequence: %s",
key,
start,
finish2,
ts.state,
dict(recommendations),
)
if self.plugins:
# Temporarily put back forgotten key for plugin to retrieve it
if ts.state == "forgotten":
try:
ts.dependents = dependents
ts.dependencies = dependencies
except KeyError:
pass
self.tasks[ts.key] = ts
for plugin in list(self.plugins):
try:
plugin.transition(key, start, finish2, *args, **kwargs)
except Exception:
logger.info("Plugin failed with exception", exc_info=True)
if ts.state == "forgotten":
del self.tasks[ts.key]
if ts.state == "forgotten" and ts.group.name in self.task_groups:
# Remove TaskGroup if all tasks are in the forgotten state
tg = ts.group
if not any(tg.states.get(s) for s in ALL_TASK_STATES):
ts.prefix.groups.remove(tg)
del self.task_groups[tg.name]
return recommendations
except Exception as e:
logger.exception("Error transitioning %r from %r to %r", key, start, finish)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transitions(self, recommendations):
""" Process transitions until none are left
This includes feedback from previous transitions and continues until we
reach a steady state
"""
keys = set()
recommendations = recommendations.copy()
while recommendations:
key, finish = recommendations.popitem()
keys.add(key)
new = self.transition(key, finish)
recommendations.update(new)
if self.validate:
for key in keys:
self.validate_key(key)
def story(self, *keys):
""" Get all transitions that touch one of the input keys """
keys = set(keys)
return [
t for t in self.transition_log if t[0] in keys or keys.intersection(t[3])
]
transition_story = story
def reschedule(self, key=None, worker=None):
""" Reschedule a task
Things may have shifted and this task may now be better suited to run
elsewhere
"""
try:
ts = self.tasks[key]
except KeyError:
logger.warning(
"Attempting to reschedule task {}, which was not "
"found on the scheduler. Aborting reschedule.".format(key)
)
return
if ts.state != "processing":
return
if worker and ts.processing_on.address != worker:
return
self.transitions({key: "released"})
##############################
# Assigning Tasks to Workers #
##############################
def check_idle_saturated(self, ws, occ=None):
""" Update the status of the idle and saturated state
The scheduler keeps track of workers that are ..
- Saturated: have enough work to stay busy
- Idle: do not have enough work to stay busy
They are considered saturated if they both have enough tasks to occupy
all of their threads, and if the expected runtime of those tasks is
large enough.
This is useful for load balancing and adaptivity.
"""
if self.total_nthreads == 0 or ws.status == Status.closed:
return
if occ is None:
occ = ws.occupancy
nc = ws.nthreads
p = len(ws.processing)
avg = self.total_occupancy / self.total_nthreads
if p < nc or occ / nc < avg / 2:
self.idle.add(ws)
self.saturated.discard(ws)
else:
self.idle.discard(ws)
pending = occ * (p - nc) / p / nc
if p > nc and pending > 0.4 and pending > 1.9 * avg:
self.saturated.add(ws)
else:
self.saturated.discard(ws)
def valid_workers(self, ts):
""" Return set of currently valid workers for key
If all workers are valid then this returns ``True``.
This checks tracks the following state:
* worker_restrictions
* host_restrictions
* resource_restrictions
"""
s = True
if ts.worker_restrictions:
s = {w for w in ts.worker_restrictions if w in self.workers}
if ts.host_restrictions:
# Resolve the alias here rather than early, for the worker
# may not be connected when host_restrictions is populated
hr = [self.coerce_hostname(h) for h in ts.host_restrictions]
# XXX need HostState?
ss = [self.host_info[h]["addresses"] for h in hr if h in self.host_info]
ss = set.union(*ss) if ss else set()
if s is True:
s = ss
else:
s |= ss
if ts.resource_restrictions:
w = {
resource: {
w
for w, supplied in self.resources[resource].items()
if supplied >= required
}
for resource, required in ts.resource_restrictions.items()
}
ww = set.intersection(*w.values())
if s is True:
s = ww
else:
s &= ww
if s is True:
return s
else:
return {self.workers[w] for w in s}
def consume_resources(self, ts, ws):
if ts.resource_restrictions:
for r, required in ts.resource_restrictions.items():
ws.used_resources[r] += required
def release_resources(self, ts, ws):
if ts.resource_restrictions:
for r, required in ts.resource_restrictions.items():
ws.used_resources[r] -= required
#####################
# Utility functions #
#####################
def add_resources(self, comm=None, worker=None, resources=None):
ws = self.workers[worker]
if resources:
ws.resources.update(resources)
ws.used_resources = {}
for resource, quantity in ws.resources.items():
ws.used_resources[resource] = 0
self.resources[resource][worker] = quantity
return "OK"
def remove_resources(self, worker):
ws = self.workers[worker]
for resource, quantity in ws.resources.items():
del self.resources[resource][worker]
def coerce_address(self, addr, resolve=True):
"""
Coerce possible input addresses to canonical form.
*resolve* can be disabled for testing with fake hostnames.
Handles strings, tuples, or aliases.
"""
# XXX how many address-parsing routines do we have?
if addr in self.aliases:
addr = self.aliases[addr]
if isinstance(addr, tuple):
addr = unparse_host_port(*addr)
if not isinstance(addr, str):
raise TypeError("addresses should be strings or tuples, got %r" % (addr,))
if resolve:
addr = resolve_address(addr)
else:
addr = normalize_address(addr)
return addr
def coerce_hostname(self, host):
"""
Coerce the hostname of a worker.
"""
if host in self.aliases:
return self.workers[self.aliases[host]].host
else:
return host
def workers_list(self, workers):
"""
List of qualifying workers
Takes a list of worker addresses or hostnames.
Returns a list of all worker addresses that match
"""
if workers is None:
return list(self.workers)
out = set()
for w in workers:
if ":" in w:
out.add(w)
else:
out.update({ww for ww in self.workers if w in ww}) # TODO: quadratic
return list(out)
def start_ipython(self, comm=None):
"""Start an IPython kernel
Returns Jupyter connection info dictionary.
"""
from ._ipython_utils import start_ipython
if self._ipython_kernel is None:
self._ipython_kernel = start_ipython(
ip=self.ip, ns={"scheduler": self}, log=logger
)
return self._ipython_kernel.get_connection_info()
def worker_objective(self, ts, ws):
"""
Objective function to determine which worker should get the task
Minimize expected start time. If a tie then break with data storage.
"""
comm_bytes = sum(
[dts.get_nbytes() for dts in ts.dependencies if ws not in dts.who_has]
)
stack_time = ws.occupancy / ws.nthreads
start_time = comm_bytes / self.bandwidth + stack_time
if ts.actor:
return (len(ws.actors), start_time, ws.nbytes)
else:
return (start_time, ws.nbytes)
async def get_profile(
self,
comm=None,
workers=None,
scheduler=False,
server=False,
merge_workers=True,
start=None,
stop=None,
key=None,
):
if workers is None:
workers = self.workers
else:
workers = set(self.workers) & set(workers)
if scheduler:
return profile.get_profile(self.io_loop.profile, start=start, stop=stop)
results = await asyncio.gather(
*(
self.rpc(w).profile(start=start, stop=stop, key=key, server=server)
for w in workers
),
return_exceptions=True,
)
results = [r for r in results if not isinstance(r, Exception)]
if merge_workers:
response = profile.merge(*results)
else:
response = dict(zip(workers, results))
return response
async def get_profile_metadata(
self,
comm=None,
workers=None,
merge_workers=True,
start=None,
stop=None,
profile_cycle_interval=None,
):
dt = profile_cycle_interval or dask.config.get(
"distributed.worker.profile.cycle"
)
dt = parse_timedelta(dt, default="ms")
if workers is None:
workers = self.workers
else:
workers = set(self.workers) & set(workers)
results = await asyncio.gather(
*(self.rpc(w).profile_metadata(start=start, stop=stop) for w in workers),
return_exceptions=True,
)
results = [r for r in results if not isinstance(r, Exception)]
counts = [v["counts"] for v in results]
counts = itertools.groupby(merge_sorted(*counts), lambda t: t[0] // dt * dt)
counts = [(time, sum(pluck(1, group))) for time, group in counts]
keys = set()
for v in results:
for t, d in v["keys"]:
for k in d:
keys.add(k)
keys = {k: [] for k in keys}
groups1 = [v["keys"] for v in results]
groups2 = list(merge_sorted(*groups1, key=first))
last = 0
for t, d in groups2:
tt = t // dt * dt
if tt > last:
last = tt
for k, v in keys.items():
v.append([tt, 0])
for k, v in d.items():
keys[k][-1][1] += v
return {"counts": counts, "keys": keys}
async def performance_report(self, comm=None, start=None, code=""):
stop = time()
# Profiles
compute, scheduler, workers = await asyncio.gather(
*[
self.get_profile(start=start),
self.get_profile(scheduler=True, start=start),
self.get_profile(server=True, start=start),
]
)
from . import profile
def profile_to_figure(state):
data = profile.plot_data(state)
figure, source = profile.plot_figure(data, sizing_mode="stretch_both")
return figure
compute, scheduler, workers = map(
profile_to_figure, (compute, scheduler, workers)
)
# Task stream
task_stream = self.get_task_stream(start=start)
total_tasks = len(task_stream)
timespent = defaultdict(int)
for d in task_stream:
for x in d.get("startstops", []):
timespent[x["action"]] += x["stop"] - x["start"]
tasks_timings = ""
for k in sorted(timespent.keys()):
tasks_timings += f"\n<li> {k} time: {format_time(timespent[k])} </li>"
from .diagnostics.task_stream import rectangles
from .dashboard.components.scheduler import task_stream_figure
rects = rectangles(task_stream)
source, task_stream = task_stream_figure(sizing_mode="stretch_both")
source.data.update(rects)
from distributed.dashboard.components.scheduler import (
BandwidthWorkers,
BandwidthTypes,
)
bandwidth_workers = BandwidthWorkers(self, sizing_mode="stretch_both")
bandwidth_workers.update()
bandwidth_types = BandwidthTypes(self, sizing_mode="stretch_both")
bandwidth_types.update()
from bokeh.models import Panel, Tabs, Div
# HTML
html = """
<h1> Dask Performance Report </h1>
<i> Select different tabs on the top for additional information </i>
<h2> Duration: {time} </h2>
<h2> Tasks Information </h2>
<ul>
<li> number of tasks: {ntasks} </li>
{tasks_timings}
</ul>
<h2> Scheduler Information </h2>
<ul>
<li> Address: {address} </li>
<li> Workers: {nworkers} </li>
<li> Threads: {threads} </li>
<li> Memory: {memory} </li>
</ul>
<h2> Calling Code </h2>
<pre>
{code}
</pre>
""".format(
time=format_time(stop - start),
ntasks=total_tasks,
tasks_timings=tasks_timings,
address=self.address,
nworkers=len(self.workers),
threads=sum(w.nthreads for w in self.workers.values()),
memory=format_bytes(sum(w.memory_limit for w in self.workers.values())),
code=code,
)
html = Div(text=html)
html = Panel(child=html, title="Summary")
compute = Panel(child=compute, title="Worker Profile (compute)")
workers = Panel(child=workers, title="Worker Profile (administrative)")
scheduler = Panel(child=scheduler, title="Scheduler Profile (administrative)")
task_stream = Panel(child=task_stream, title="Task Stream")
bandwidth_workers = Panel(
child=bandwidth_workers.fig, title="Bandwidth (Workers)"
)
bandwidth_types = Panel(child=bandwidth_types.fig, title="Bandwidth (Types)")
tabs = Tabs(
tabs=[
html,
task_stream,
compute,
workers,
scheduler,
bandwidth_workers,
bandwidth_types,
]
)
from bokeh.plotting import save, output_file
from bokeh.core.templates import get_env
with tmpfile(extension=".html") as fn:
output_file(filename=fn, title="Dask Performance Report")
template_directory = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "dashboard", "templates"
)
template_environment = get_env()
template_environment.loader.searchpath.append(template_directory)
template = template_environment.get_template("performance_report.html")
save(tabs, filename=fn, template=template)
with open(fn) as f:
data = f.read()
return data
async def get_worker_logs(self, comm=None, n=None, workers=None, nanny=False):
results = await self.broadcast(
msg={"op": "get_logs", "n": n}, workers=workers, nanny=nanny
)
return results
###########
# Cleanup #
###########
def reevaluate_occupancy(self, worker_index=0):
""" Periodically reassess task duration time
The expected duration of a task can change over time. Unfortunately we
don't have a good constant-time way to propagate the effects of these
changes out to the summaries that they affect, like the total expected
runtime of each of the workers, or what tasks are stealable.
In this coroutine we walk through all of the workers and re-align their
estimates with the current state of tasks. We do this periodically
rather than at every transition, and we only do it if the scheduler
process isn't under load (using psutil.Process.cpu_percent()). This
lets us avoid this fringe optimization when we have better things to
think about.
"""
DELAY = 0.1
try:
if self.status == Status.closed:
return
last = time()
next_time = timedelta(seconds=DELAY)
if self.proc.cpu_percent() < 50:
workers = list(self.workers.values())
for i in range(len(workers)):
ws = workers[worker_index % len(workers)]
worker_index += 1
try:
if ws is None or not ws.processing:
continue
self._reevaluate_occupancy_worker(ws)
finally:
del ws # lose ref
duration = time() - last
if duration > 0.005: # 5ms since last release
next_time = timedelta(seconds=duration * 5) # 25ms gap
break
self.loop.add_timeout(
next_time, self.reevaluate_occupancy, worker_index=worker_index
)
except Exception:
logger.error("Error in reevaluate occupancy", exc_info=True)
raise
def _reevaluate_occupancy_worker(self, ws):
""" See reevaluate_occupancy """
old = ws.occupancy
new = 0
nbytes = 0
for ts in ws.processing:
duration = self.get_task_duration(ts)
comm = self.get_comm_cost(ts, ws)
ws.processing[ts] = duration + comm
new += duration + comm
ws.occupancy = new
self.total_occupancy += new - old
self.check_idle_saturated(ws)
# significant increase in duration
if (new > old * 1.3) and ("stealing" in self.extensions):
steal = self.extensions["stealing"]
for ts in ws.processing:
steal.remove_key_from_stealable(ts)
steal.put_key_in_stealable(ts)
async def check_worker_ttl(self):
now = time()
for ws in self.workers.values():
if ws.last_seen < now - self.worker_ttl:
logger.warning(
"Worker failed to heartbeat within %s seconds. Closing: %s",
self.worker_ttl,
ws,
)
await self.remove_worker(address=ws.address)
def check_idle(self):
if any(ws.processing for ws in self.workers.values()) or self.unrunnable:
self.idle_since = None
return
elif not self.idle_since:
self.idle_since = time()
if time() > self.idle_since + self.idle_timeout:
logger.info(
"Scheduler closing after being idle for %s",
format_time(self.idle_timeout),
)
self.loop.add_callback(self.close)
def adaptive_target(self, comm=None, target_duration=None):
""" Desired number of workers based on the current workload
This looks at the current running tasks and memory use, and returns a
number of desired workers. This is often used by adaptive scheduling.
Parameters
----------
target_duration: str
A desired duration of time for computations to take. This affects
how rapidly the scheduler will ask to scale.
See Also
--------
distributed.deploy.Adaptive
"""
if target_duration is None:
target_duration = dask.config.get("distributed.adaptive.target-duration")
target_duration = parse_timedelta(target_duration)
# CPU
cpu = math.ceil(
self.total_occupancy / target_duration
) # TODO: threads per worker
# Avoid a few long tasks from asking for many cores
tasks_processing = 0
for ws in self.workers.values():
tasks_processing += len(ws.processing)
if tasks_processing > cpu:
break
else:
cpu = min(tasks_processing, cpu)
if self.unrunnable and not self.workers:
cpu = max(1, cpu)
# Memory
limit_bytes = {addr: ws.memory_limit for addr, ws in self.workers.items()}
worker_bytes = [ws.nbytes for ws in self.workers.values()]
limit = sum(limit_bytes.values())
total = sum(worker_bytes)
if total > 0.6 * limit:
memory = 2 * len(self.workers)
else:
memory = 0
target = max(memory, cpu)
if target >= len(self.workers):
return target
else: # Scale down?
to_close = self.workers_to_close()
return len(self.workers) - len(to_close)
def decide_worker(ts, all_workers, valid_workers, objective):
"""
Decide which worker should take task *ts*.
We choose the worker that has the data on which *ts* depends.
If several workers have dependencies then we choose the less-busy worker.
Optionally provide *valid_workers* of where jobs are allowed to occur
(if all workers are allowed to take the task, pass True instead).
If the task requires data communication because no eligible worker has
all the dependencies already, then we choose to minimize the number
of bytes sent between workers. This is determined by calling the
*objective* function.
"""
deps = ts.dependencies
assert all(dts.who_has for dts in deps)
if ts.actor:
candidates = all_workers
else:
candidates = frequencies([ws for dts in deps for ws in dts.who_has])
if valid_workers is True:
if not candidates:
candidates = all_workers
else:
candidates = valid_workers & set(candidates)
if not candidates:
candidates = valid_workers
if not candidates:
if ts.loose_restrictions:
return decide_worker(ts, all_workers, True, objective)
else:
return None
if not candidates:
return None
if len(candidates) == 1:
return first(candidates)
return min(candidates, key=objective)
def validate_task_state(ts):
"""
Validate the given TaskState.
"""
assert ts.state in ALL_TASK_STATES or ts.state == "forgotten", ts
if ts.waiting_on:
assert ts.waiting_on.issubset(ts.dependencies), (
"waiting not subset of dependencies",
str(ts.waiting_on),
str(ts.dependencies),
)
if ts.waiters:
assert ts.waiters.issubset(ts.dependents), (
"waiters not subset of dependents",
str(ts.waiters),
str(ts.dependents),
)
for dts in ts.waiting_on:
assert not dts.who_has, ("waiting on in-memory dep", str(ts), str(dts))
assert dts.state != "released", ("waiting on released dep", str(ts), str(dts))
for dts in ts.dependencies:
assert ts in dts.dependents, (
"not in dependency's dependents",
str(ts),
str(dts),
str(dts.dependents),
)
if ts.state in ("waiting", "processing"):
assert dts in ts.waiting_on or dts.who_has, (
"dep missing",
str(ts),
str(dts),
)
assert dts.state != "forgotten"
for dts in ts.waiters:
assert dts.state in ("waiting", "processing"), (
"waiter not in play",
str(ts),
str(dts),
)
for dts in ts.dependents:
assert ts in dts.dependencies, (
"not in dependent's dependencies",
str(ts),
str(dts),
str(dts.dependencies),
)
assert dts.state != "forgotten"
assert (ts.processing_on is not None) == (ts.state == "processing")
assert bool(ts.who_has) == (ts.state == "memory"), (ts, ts.who_has)
if ts.state == "processing":
assert all(dts.who_has for dts in ts.dependencies), (
"task processing without all deps",
str(ts),
str(ts.dependencies),
)
assert not ts.waiting_on
if ts.who_has:
assert ts.waiters or ts.who_wants, (
"unneeded task in memory",
str(ts),
str(ts.who_has),
)
if ts.run_spec: # was computed
assert ts.type
assert isinstance(ts.type, str)
assert not any(ts in dts.waiting_on for dts in ts.dependents)
for ws in ts.who_has:
assert ts in ws.has_what, (
"not in who_has' has_what",
str(ts),
str(ws),
str(ws.has_what),
)
if ts.who_wants:
for cs in ts.who_wants:
assert ts in cs.wants_what, (
"not in who_wants' wants_what",
str(ts),
str(cs),
str(cs.wants_what),
)
if ts.actor:
if ts.state == "memory":
assert sum([ts in ws.actors for ws in ts.who_has]) == 1
if ts.state == "processing":
assert ts in ts.processing_on.actors
def validate_worker_state(ws):
for ts in ws.has_what:
assert ws in ts.who_has, (
"not in has_what' who_has",
str(ws),
str(ts),
str(ts.who_has),
)
for ts in ws.actors:
assert ts.state in ("memory", "processing")
def validate_state(tasks, workers, clients):
"""
Validate a current runtime state
This performs a sequence of checks on the entire graph, running in about
linear time. This raises assert errors if anything doesn't check out.
"""
for ts in tasks.values():
validate_task_state(ts)
for ws in workers.values():
validate_worker_state(ws)
for cs in clients.values():
for ts in cs.wants_what:
assert cs in ts.who_wants, (
"not in wants_what' who_wants",
str(cs),
str(ts),
str(ts.who_wants),
)
_round_robin = [0]
def heartbeat_interval(n):
"""
Interval in seconds that we desire heartbeats based on number of workers
"""
if n <= 10:
return 0.5
elif n < 50:
return 1
elif n < 200:
return 2
else:
return 5
class KilledWorker(Exception):
def __init__(self, task, last_worker):
super(KilledWorker, self).__init__(task, last_worker)
self.task = task
self.last_worker = last_worker
class WorkerStatusPlugin(SchedulerPlugin):
"""
An plugin to share worker status with a remote observer
This is used in cluster managers to keep updated about the status of the
scheduler.
"""
def __init__(self, scheduler, comm):
self.bcomm = BatchedSend(interval="5ms")
self.bcomm.start(comm)
self.scheduler = scheduler
self.scheduler.add_plugin(self)
def add_worker(self, worker=None, **kwargs):
ident = self.scheduler.workers[worker].identity()
del ident["metrics"]
del ident["last_seen"]
try:
self.bcomm.send(["add", {"workers": {worker: ident}}])
except CommClosedError:
self.scheduler.remove_plugin(self)
def remove_worker(self, worker=None, **kwargs):
try:
self.bcomm.send(["remove", worker])
except CommClosedError:
self.scheduler.remove_plugin(self)
def teardown(self):
self.bcomm.close()
|
py | 1a52333415133504ede032422216140f27b2b97c | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Margin based AL method.
Samples in batches based on margin scores.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from .sampling_def import SamplingMethod
class MarginAL(SamplingMethod):
def __init__(self, X, y, seed):
self.X = X
self.y = y
self.name = 'margin'
def select_batch_(self, model, already_selected, N, **kwargs):
"""Returns batch of datapoints with smallest margin/highest uncertainty.
For binary classification, can just take the absolute distance to decision
boundary for each point.
For multiclass classification, must consider the margin between distance for
top two most likely classes.
Args:
model: scikit learn model with decision_function implemented
already_selected: index of datapoints already selected
N: batch size
Returns:
indices of points selected to add using margin active learner
"""
try:
distances = model.decision_function(self.X)
except:
distances = model.predict_proba(self.X)
if len(distances.shape) < 2:
min_margin = abs(distances)
else:
sort_distances = np.sort(distances, 1)[:, -2:]
min_margin = sort_distances[:, 1] - sort_distances[:, 0]
rank_ind = np.argsort(min_margin)
rank_ind = [i for i in rank_ind if i not in already_selected]
active_samples = rank_ind[0:N]
return active_samples
|
py | 1a5233b7b02ca209a7f4317df77e7681fcb9e960 | from objects import globals
from base64 import b85decode
def decode_host(hash_host: str) -> str:
"""Decoding host
:param: Get hash host (url)
:type: str
:return: Already decoded host
:rtype: str
"""
decode_hash_host = (b85decode(hash_host).decode("utf-8")
[len(globals.config["services_url_hash_key"].split("::")[0]):]
[:-len(globals.config["services_url_hash_key"].split("::")[1])])
return decode_hash_host |
py | 1a523482327bdbe9e5d15f6927cd507bc6b186d1 | import asyncio
import logging
import os
import sys
from logging.handlers import RotatingFileHandler
import aioredis
from cacheout import Cache
from houdini import PenguinStringCompiler
from houdini.data import db
from houdini.data.permission import PermissionCollection
from houdini.penguin import Penguin
from houdini.spheniscidae import Spheniscidae
try:
import uvloop
uvloop.install()
except ImportError:
uvloop = None
import houdini.handlers
import houdini.plugins
from houdini.handlers import XTListenerManager, XMLListenerManager, DummyEventListenerManager
from houdini.plugins import PluginManager
from houdini.commands import CommandManager
class Houdini:
def __init__(self, config):
self.server = None
self.redis = None
self.cache = None
self.config = config
self.db = db
self.peers_by_ip = {}
self.logger = None
self.client_class = Spheniscidae
self.penguin_string_compiler = None
self.anonymous_penguin_string_compiler = None
self.penguins_by_id = {}
self.penguins_by_username = {}
self.penguins_by_character_id = {}
self.igloos_by_penguin_id = {}
self.open_igloos_by_penguin_id = {}
self.xt_listeners = XTListenerManager(self)
self.xml_listeners = XMLListenerManager(self)
self.dummy_event_listeners = DummyEventListenerManager(self)
self.commands = CommandManager(self)
self.plugins = PluginManager(self)
self.permissions = None
self.chat_filter_words = None
self.items = None
self.igloos = None
self.furniture = None
self.locations = None
self.flooring = None
self.rooms = None
self.stamps = None
self.cards = None
self.postcards = None
self.puffles = None
self.puffle_items = None
self.puffle_food_treasure = None
self.puffle_furniture_treasure = None
self.puffle_clothing_treasure = None
self.characters = None
self.dance_songs = None
self.heartbeat = None
self.egg_timer = None
self.puffle_killer = None
self.music = None
self.dance_floor = None
self.match_making = None
self.water_match_making = None
self.fire_match_making = None
self.puck = (0, 0)
async def start(self):
general_log_file = self.config.logging_general_path if self.config.logging_general_path \
else f'logs/{self.config.name.lower()}.log'
errors_log_file = self.config.logging_error_path if self.config.logging_error_path \
else f'logs/{self.config.name.lower()}-errors.log'
general_log_directory = os.path.dirname(general_log_file)
errors_log_directory = os.path.dirname(errors_log_file)
if not os.path.exists(general_log_directory):
os.mkdir(general_log_directory)
if not os.path.exists(errors_log_directory):
os.mkdir(errors_log_directory)
self.logger = logging.getLogger('houdini')
universal_handler = RotatingFileHandler(general_log_file,
maxBytes=2097152, backupCount=3, encoding='utf-8')
error_handler = logging.FileHandler(errors_log_file)
console_handler = logging.StreamHandler(stream=sys.stdout)
log_formatter = logging.Formatter('%(asctime)s [%(levelname)-5.5s] %(message)s')
error_handler.setLevel(logging.ERROR)
universal_handler.setFormatter(log_formatter)
console_handler.setFormatter(log_formatter)
self.logger.addHandler(universal_handler)
self.logger.addHandler(console_handler)
self.logger.addHandler(error_handler)
level = logging.getLevelName(self.config.logging_level)
self.logger.setLevel(level)
self.server = await asyncio.start_server(
self.client_connected, self.config.address,
self.config.port
)
await self.db.set_bind('postgresql://{}:{}@{}/{}'.format(
self.config.database_username, self.config.database_password,
self.config.database_address,
self.config.database_name))
self.logger.info('Booting Houdini')
self.redis = await aioredis.create_redis_pool('redis://{}:{}'.format(
self.config.redis_address, self.config.redis_port),
minsize=5, maxsize=10)
if self.config.type == 'world':
await self.redis.delete(f'houdini.players.{self.config.id}')
await self.redis.hset(f'houdini.population', self.config.id, 0)
self.cache = Cache(maxsize=None, ttl=self.config.cache_expiry)
self.client_class = Penguin
self.penguin_string_compiler = PenguinStringCompiler()
self.anonymous_penguin_string_compiler = PenguinStringCompiler()
PenguinStringCompiler.setup_default_builder(self.penguin_string_compiler)
PenguinStringCompiler.setup_anonymous_default_builder(self.anonymous_penguin_string_compiler)
await self.xml_listeners.setup(houdini.handlers, exclude_load='houdini.handlers.login.login')
await self.xt_listeners.setup(houdini.handlers)
self.logger.info('World server started')
else:
await self.xml_listeners.setup(houdini.handlers, 'houdini.handlers.login.login')
self.logger.info('Login server started')
await self.dummy_event_listeners.setup(houdini.handlers)
await self.dummy_event_listeners.fire('boot', self)
self.permissions = await PermissionCollection.get_collection()
self.logger.info(f'Multi-client support is '
f'{"enabled" if not self.config.single_client_mode else "disabled"}')
self.logger.info(f'Listening on {self.config.address}:{self.config.port}')
if self.config.auth_key != 'houdini':
self.logger.warning('The static key has been changed from the default, '
'this may cause authentication issues!')
await self.plugins.setup(houdini.plugins)
async with self.server:
await self.server.serve_forever()
async def client_connected(self, reader, writer):
client_object = self.client_class(self, reader, writer)
await client_object.run()
|
py | 1a5235781b14d2c4eccac25b3a2929a7c186bdc4 | from fython.config.trim import trim as _trim
_write_cache = ''
def write(source, *args, end='\n', **kwargs):
global _write_cache
source = _trim(source)
_write_cache += source.format(*args, **kwargs)
_write_cache += end
return _write_cache
def _reset_cache():
global _write_cache
_write_cache = ''
|
py | 1a5235aacd454aaa7a23ede70e4c1c4e57b5e38c | #!/bin/env python
#
# utils.py: utility functions for RnaChipIntegrator
# Copyright (C) University of Manchester 2011-15 Peter Briggs, Leo Zeef
# & Ian Donaldson
#
"""
utils.py
Utility functions for RnaChipIntegrator:
- make_errline: highlight problem fields in a string
- truncate_text: truncate a text string to a specified length
"""
def make_errline(line,bad_fields=[]):
"""Return an 'error line' indicating problem fields in a string
Given a tab-delimited line and a list of integer indices
indicating which fields in the line have problems, this function
returns a tab-delimited string where the original fields are
replaced by either spaces or '^' characters.
When printed beneath the original line, the '^'s indicate which
fields are 'bad' according to the supplied indices, e.g.
Input line: 'good good bad bad good'
Error line: ' ^^^ ^^^ '
Arguments:
line: string where tabs delimit fields
bad_fields: list of integer indices corresponding to 'bad'
values in 'line'
Returns:
Tab-delimited 'error line' to be printed beneath the original
line, to indicate which fields are 'bad'.
"""
# Indicate problem field(s)
errline = []
items = line.rstrip().split('\t')
for i in range(len(items)):
if i in bad_fields:
errline.append("^"*len(items[i]))
else:
errline.append(" "*len(items[i]))
return '\t'.join(errline)
def truncate_text(text,max_len):
"""Truncate a text string
Given a title and an optional extension, remove characters
and replace with ellipsis (i.e. ...) so that it fit into
the maxium number of characters (max_len).
"""
len_text = len(text)
if len_text <= max_len:
return text
text = text[len_text-max_len:]
return '...' + text[3:]
|
py | 1a523607808426aa4c22d8e9790bdd32ddb66afc | """ MXNet neural networks for tabular data containing numerical, categorical, and text fields.
First performs neural network specific pre-processing of the data.
Contains separate input modules which are applied to different columns of the data depending on the type of values they contain:
- Numeric columns are pased through single Dense layer (binary categorical variables are treated as numeric)
- Categorical columns are passed through separate Embedding layers
- Text columns are passed through separate LanguageModel layers
Vectors produced by different input layers are then concatenated and passed to multi-layer MLP model with problem_type determined output layer.
Hyperparameters are passed as dict params, including options for preprocessing stages.
"""
import random, json, time, os, logging, warnings
from collections import OrderedDict
import numpy as np
import pandas as pd
import mxnet as mx
from mxnet import nd, autograd, gluon
from gluoncv.utils import LRSequential, LRScheduler
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, QuantileTransformer, FunctionTransformer # PowerTransformer
from autogluon.core import Space
from autogluon.core.utils import try_import_mxboard
from autogluon.core.metrics import log_loss, roc_auc
from autogluon.core.constants import BINARY, MULTICLASS, REGRESSION, SOFTCLASS
from .categorical_encoders import OneHotMergeRaresHandleUnknownEncoder, OrdinalMergeRaresHandleUnknownEncoder
from .embednet import EmbedNet
from .hyperparameters.parameters import get_default_param
from .hyperparameters.searchspaces import get_default_searchspace
from .tabular_nn_dataset import TabularNNDataset
from .tabular_nn_trial import tabular_nn_trial
from ..abstract.abstract_model import AbstractNeuralNetworkModel
from ..utils import fixedvals_from_searchspaces
from ...features.feature_metadata import R_INT, R_FLOAT, R_CATEGORY, R_OBJECT, S_TEXT_NGRAM, S_TEXT_AS_CATEGORY
warnings.filterwarnings("ignore", module='sklearn.preprocessing') # sklearn processing n_quantiles warning
logger = logging.getLogger(__name__)
EPS = 1e-10 # small number
# TODO: Gets stuck after infering feature types near infinitely in nyc-jiashenliu-515k-hotel-reviews-data-in-europe dataset, 70 GB of memory, c5.9xlarge
# Suspect issue is coming from embeddings due to text features with extremely large categorical counts.
class TabularNeuralNetModel(AbstractNeuralNetworkModel):
""" Class for neural network models that operate on tabular data.
These networks use different types of input layers to process different types of data in various columns.
Attributes:
_types_of_features (dict): keys = 'continuous', 'skewed', 'onehot', 'embed', 'language'; values = column-names of Dataframe corresponding to the features of this type
feature_arraycol_map (OrderedDict): maps feature-name -> list of column-indices in df corresponding to this feature
self.feature_type_map (OrderedDict): maps feature-name -> feature_type string (options: 'vector', 'embed', 'language')
processor (sklearn.ColumnTransformer): scikit-learn preprocessor object.
Note: This model always assumes higher values of self.eval_metric indicate better performance.
"""
# Constants used throughout this class:
# model_internals_file_name = 'model-internals.pkl' # store model internals here
unique_category_str = '!missing!' # string used to represent missing values and unknown categories for categorical features. Should not appear in the dataset
rescale_losses = {gluon.loss.L1Loss:'std', gluon.loss.HuberLoss:'std', gluon.loss.L2Loss:'var'} # dict of loss names where we should rescale loss, value indicates how to rescale. Call self.loss_func.name
params_file_name = 'net.params' # Stores parameters of final network
temp_file_name = 'temp_net.params' # Stores temporary network parameters (eg. during the course of training)
def __init__(self, **kwargs):
super().__init__(**kwargs)
"""
TabularNeuralNetModel object.
Parameters
----------
path (str): file-path to directory where to save files associated with this model
name (str): name used to refer to this model
problem_type (str): what type of prediction problem is this model used for
eval_metric (func): function used to evaluate performance (Note: we assume higher = better)
hyperparameters (dict): various hyperparameters for neural network and the NN-specific data processing
features (list): List of predictive features to use, other features are ignored by the model.
"""
self.feature_arraycol_map = None
self.feature_type_map = None
self.features_to_drop = [] # may change between different bagging folds. TODO: consider just removing these from self.features if it works with bagging
self.processor = None # data processor
self.summary_writer = None
self.ctx = mx.cpu()
self.batch_size = None
self.num_dataloading_workers = None
self.num_dataloading_workers_inference = 0
self.params_post_fit = None
self.num_net_outputs = None
self._architecture_desc = None
self.optimizer = None
self.verbosity = None
if self.stopping_metric is not None and self.eval_metric == roc_auc and self.stopping_metric == log_loss:
self.stopping_metric = roc_auc # NN is overconfident so early stopping with logloss can halt training too quick
self.eval_metric_name = self.stopping_metric.name
def _set_default_params(self):
""" Specifies hyperparameter values to use by default """
default_params = get_default_param(self.problem_type)
for param, val in default_params.items():
self._set_default_param_value(param, val)
def _get_default_auxiliary_params(self) -> dict:
default_auxiliary_params = super()._get_default_auxiliary_params()
extra_auxiliary_params = dict(
ignored_type_group_raw=[R_OBJECT],
ignored_type_group_special=[S_TEXT_NGRAM, S_TEXT_AS_CATEGORY],
)
default_auxiliary_params.update(extra_auxiliary_params)
return default_auxiliary_params
def _get_default_searchspace(self):
return get_default_searchspace(self.problem_type, num_classes=None)
def set_net_defaults(self, train_dataset, params):
""" Sets dataset-adaptive default values to use for our neural network """
if (self.problem_type == MULTICLASS) or (self.problem_type == SOFTCLASS):
self.num_net_outputs = train_dataset.num_classes
elif self.problem_type == REGRESSION:
self.num_net_outputs = 1
if params['y_range'] is None: # Infer default y-range
y_vals = train_dataset.dataset._data[train_dataset.label_index].asnumpy()
min_y = float(min(y_vals))
max_y = float(max(y_vals))
std_y = np.std(y_vals)
y_ext = params['y_range_extend'] * std_y
if min_y >= 0: # infer y must be nonnegative
min_y = max(0, min_y-y_ext)
else:
min_y = min_y-y_ext
if max_y <= 0: # infer y must be non-positive
max_y = min(0, max_y+y_ext)
else:
max_y = max_y+y_ext
params['y_range'] = (min_y, max_y)
elif self.problem_type == BINARY:
self.num_net_outputs = 2
else:
raise ValueError("unknown problem_type specified: %s" % self.problem_type)
if params['layers'] is None: # Use default choices for MLP architecture
if self.problem_type == REGRESSION:
default_layer_sizes = [256, 128] # overall network will have 4 layers. Input layer, 256-unit hidden layer, 128-unit hidden layer, output layer.
else:
default_sizes = [256, 128] # will be scaled adaptively
# base_size = max(1, min(self.num_net_outputs, 20)/2.0) # scale layer width based on number of classes
base_size = max(1, min(self.num_net_outputs, 100) / 50) # TODO: Updated because it improved model quality and made training far faster
default_layer_sizes = [defaultsize*base_size for defaultsize in default_sizes]
layer_expansion_factor = 1 # TODO: consider scaling based on num_rows, eg: layer_expansion_factor = 2-np.exp(-max(0,train_dataset.num_examples-10000))
max_layer_width = params['max_layer_width']
params['layers'] = [int(min(max_layer_width, layer_expansion_factor*defaultsize)) for defaultsize in default_layer_sizes]
if train_dataset.has_vector_features() and params['numeric_embed_dim'] is None: # Use default choices for numeric embedding size
vector_dim = train_dataset.dataset._data[train_dataset.vectordata_index].shape[1] # total dimensionality of vector features
prop_vector_features = train_dataset.num_vector_features() / float(train_dataset.num_features) # Fraction of features that are numeric
min_numeric_embed_dim = 32
max_numeric_embed_dim = params['max_layer_width']
params['numeric_embed_dim'] = int(min(max_numeric_embed_dim, max(min_numeric_embed_dim,
params['layers'][0]*prop_vector_features*np.log10(vector_dim+10) )))
return
def _fit(self, X_train, y_train, X_val=None, y_val=None, time_limit=None, reporter=None, **kwargs):
""" X_train (pd.DataFrame): training data features (not necessarily preprocessed yet)
X_val (pd.DataFrame): test data features (should have same column names as Xtrain)
y_train (pd.Series):
y_val (pd.Series): are pandas Series
kwargs: Can specify amount of compute resources to utilize (num_cpus, num_gpus).
"""
start_time = time.time()
params = self.params.copy()
self.verbosity = kwargs.get('verbosity', 2)
params = fixedvals_from_searchspaces(params)
if self.feature_metadata is None:
raise ValueError("Trainer class must set feature_metadata for this model")
if 'num_cpus' in kwargs:
self.num_dataloading_workers = max(1, int(kwargs['num_cpus']/2.0))
else:
self.num_dataloading_workers = 1
if self.num_dataloading_workers == 1:
self.num_dataloading_workers = 0 # 0 is always faster and uses less memory than 1
self.batch_size = params['batch_size']
train_dataset, val_dataset = self.generate_datasets(X_train=X_train, y_train=y_train, params=params, X_val=X_val, y_val=y_val)
logger.log(15, "Training data for neural network has: %d examples, %d features (%d vector, %d embedding, %d language)" %
(train_dataset.num_examples, train_dataset.num_features,
len(train_dataset.feature_groups['vector']), len(train_dataset.feature_groups['embed']),
len(train_dataset.feature_groups['language']) ))
# self._save_preprocessor() # TODO: should save these things for hyperparam tunning. Need one HP tuner for network-specific HPs, another for preprocessing HPs.
if 'num_gpus' in kwargs and kwargs['num_gpus'] >= 1:
self.ctx = mx.gpu() # Currently cannot use more than 1 GPU
else:
self.ctx = mx.cpu()
self.get_net(train_dataset, params=params)
if time_limit:
time_elapsed = time.time() - start_time
time_limit = time_limit - time_elapsed
self.train_net(train_dataset=train_dataset, params=params, val_dataset=val_dataset, initialize=True, setup_trainer=True, time_limit=time_limit, reporter=reporter)
self.params_post_fit = params
"""
# TODO: if we don't want to save intermediate network parameters, need to do something like saving in temp directory to clean up after training:
with make_temp_directory() as temp_dir:
save_callback = SaveModelCallback(self.model, monitor=self.metric, mode=save_callback_mode, name=self.name)
with progress_disabled_ctx(self.model) as model:
original_path = model.path
model.path = Path(temp_dir)
model.fit_one_cycle(self.epochs, self.lr, callbacks=save_callback)
# Load the best one and export it
model.load(self.name)
print(f'Model validation metrics: {model.validate()}')
model.path = original_path
"""
def get_net(self, train_dataset, params):
""" Creates a Gluon neural net and context for this dataset.
Also sets up trainer/optimizer as necessary.
"""
self.set_net_defaults(train_dataset, params)
self.model = EmbedNet(train_dataset=train_dataset, params=params, num_net_outputs=self.num_net_outputs, ctx=self.ctx)
# TODO: Below should not occur until at time of saving
if not os.path.exists(self.path):
os.makedirs(self.path)
def train_net(self, train_dataset, params, val_dataset=None, initialize=True, setup_trainer=True, time_limit=None, reporter=None):
""" Trains neural net on given train dataset, early stops based on test_dataset.
Args:
train_dataset (TabularNNDataset): training data used to learn network weights
val_dataset (TabularNNDataset): validation data used for hyperparameter tuning
initialize (bool): set = False to continue training of a previously trained model, otherwise initializes network weights randomly
setup_trainer (bool): set = False to reuse the same trainer from a previous training run, otherwise creates new trainer from scratch
"""
start_time = time.time()
logger.log(15, "Training neural network for up to %s epochs..." % params['num_epochs'])
seed_value = params.get('seed_value')
if seed_value is not None: # Set seed
random.seed(seed_value)
np.random.seed(seed_value)
mx.random.seed(seed_value)
if initialize: # Initialize the weights of network
logging.debug("initializing neural network...")
self.model.collect_params().initialize(ctx=self.ctx)
self.model.hybridize()
logging.debug("initialized")
if setup_trainer:
# Also setup mxboard to monitor training if visualizer has been specified:
visualizer = params.get('visualizer', 'none')
if visualizer == 'tensorboard' or visualizer == 'mxboard':
try_import_mxboard()
from mxboard import SummaryWriter
self.summary_writer = SummaryWriter(logdir=self.path, flush_secs=5, verbose=False)
self.optimizer = self.setup_trainer(params=params, train_dataset=train_dataset)
best_val_metric = -np.inf # higher = better
val_metric = None
best_val_epoch = 0
val_improve_epoch = 0 # most recent epoch where validation-score strictly improved
num_epochs = params['num_epochs']
if val_dataset is not None:
y_val = val_dataset.get_labels()
else:
y_val = None
if params['loss_function'] is None:
if self.problem_type == REGRESSION:
params['loss_function'] = gluon.loss.L1Loss()
elif self.problem_type == SOFTCLASS:
params['loss_function'] = gluon.loss.SoftmaxCrossEntropyLoss(sparse_label=False, from_logits=self.model.from_logits)
else:
params['loss_function'] = gluon.loss.SoftmaxCrossEntropyLoss(from_logits=self.model.from_logits)
loss_func = params['loss_function']
epochs_wo_improve = params['epochs_wo_improve']
loss_scaling_factor = 1.0 # we divide loss by this quantity to stabilize gradients
loss_torescale = [key for key in self.rescale_losses if isinstance(loss_func, key)]
if loss_torescale:
loss_torescale = loss_torescale[0]
if self.rescale_losses[loss_torescale] == 'std':
loss_scaling_factor = np.std(train_dataset.get_labels())/5.0 + EPS # std-dev of labels
elif self.rescale_losses[loss_torescale] == 'var':
loss_scaling_factor = np.var(train_dataset.get_labels())/5.0 + EPS # variance of labels
else:
raise ValueError("Unknown loss-rescaling type %s specified for loss_func==%s" % (self.rescale_losses[loss_torescale], loss_func))
if self.verbosity <= 1:
verbose_eval = -1 # Print losses every verbose epochs, Never if -1
elif self.verbosity == 2:
verbose_eval = 50
elif self.verbosity == 3:
verbose_eval = 10
else:
verbose_eval = 1
net_filename = self.path + self.temp_file_name
if num_epochs == 0: # use dummy training loop that stops immediately (useful for using NN just for data preprocessing / debugging)
logger.log(20, "Not training Neural Net since num_epochs == 0. Neural network architecture is:")
for batch_idx, data_batch in enumerate(train_dataset.dataloader):
data_batch = train_dataset.format_batch_data(data_batch, self.ctx)
with autograd.record():
output = self.model(data_batch)
labels = data_batch['label']
loss = loss_func(output, labels) / loss_scaling_factor
# print(str(nd.mean(loss).asscalar()), end="\r") # prints per-batch losses
loss.backward()
self.optimizer.step(labels.shape[0])
if batch_idx > 0:
break
self.model.save_parameters(net_filename)
logger.log(15, "untrained Neural Net saved to file")
return
# Training Loop:
for e in range(num_epochs):
if e == 0: # special actions during first epoch:
logger.log(15, "Neural network architecture:")
logger.log(15, str(self.model))
cumulative_loss = 0
for batch_idx, data_batch in enumerate(train_dataset.dataloader):
data_batch = train_dataset.format_batch_data(data_batch, self.ctx)
with autograd.record():
output = self.model(data_batch)
labels = data_batch['label']
loss = loss_func(output, labels) / loss_scaling_factor
# print(str(nd.mean(loss).asscalar()), end="\r") # prints per-batch losses
loss.backward()
self.optimizer.step(labels.shape[0])
cumulative_loss += loss.sum()
train_loss = cumulative_loss/float(train_dataset.num_examples) # training loss this epoch
if val_dataset is not None:
val_metric = self.score(X=val_dataset, y=y_val, eval_metric=self.stopping_metric, metric_needs_y_pred=self.stopping_metric_needs_y_pred)
if (val_dataset is None) or (val_metric >= best_val_metric) or (e == 0): # keep training if score has improved
if val_dataset is not None:
if not np.isnan(val_metric):
if val_metric > best_val_metric:
val_improve_epoch = e
best_val_metric = val_metric
best_val_epoch = e
# Until functionality is added to restart training from a particular epoch, there is no point in saving params without test_dataset
if val_dataset is not None:
self.model.save_parameters(net_filename)
if val_dataset is not None:
if verbose_eval > 0 and e % verbose_eval == 0:
logger.log(15, "Epoch %s. Train loss: %s, Val %s: %s" %
(e, train_loss.asscalar(), self.eval_metric_name, val_metric))
if self.summary_writer is not None:
self.summary_writer.add_scalar(tag='val_'+self.eval_metric_name,
value=val_metric, global_step=e)
else:
if verbose_eval > 0 and e % verbose_eval == 0:
logger.log(15, "Epoch %s. Train loss: %s" % (e, train_loss.asscalar()))
if self.summary_writer is not None:
self.summary_writer.add_scalar(tag='train_loss', value=train_loss.asscalar(), global_step=e) # TODO: do we want to keep mxboard support?
if reporter is not None:
# TODO: Ensure reporter/scheduler properly handle None/nan values after refactor
if val_dataset is not None and (not np.isnan(val_metric)): # TODO: This might work without the if statement
# epoch must be number of epochs done (starting at 1)
reporter(epoch=e+1, validation_performance=val_metric, train_loss=float(train_loss.asscalar())) # Higher val_metric = better
if e - val_improve_epoch > epochs_wo_improve:
break # early-stop if validation-score hasn't strictly improved in `epochs_wo_improve` consecutive epochs
if time_limit:
time_elapsed = time.time() - start_time
time_left = time_limit - time_elapsed
if time_left <= 0:
logger.log(20, "\tRan out of time, stopping training early.")
break
if val_dataset is not None:
self.model.load_parameters(net_filename) # Revert back to best model
try:
os.remove(net_filename)
except FileNotFoundError:
pass
if val_dataset is None:
logger.log(15, "Best model found in epoch %d" % best_val_epoch)
else: # evaluate one final time:
final_val_metric = self.score(X=val_dataset, y=y_val, eval_metric=self.stopping_metric, metric_needs_y_pred=self.stopping_metric_needs_y_pred)
if np.isnan(final_val_metric):
final_val_metric = -np.inf
logger.log(15, "Best model found in epoch %d. Val %s: %s" %
(best_val_epoch, self.eval_metric_name, final_val_metric))
self.params_trained['num_epochs'] = best_val_epoch + 1
return
def _predict_proba(self, X, **kwargs):
""" To align predict with abstract_model API.
Preprocess here only refers to feature processing steps done by all AbstractModel objects,
not tabularNN-specific preprocessing steps.
If X is not DataFrame but instead TabularNNDataset object, we can still produce predictions,
but cannot use preprocess in this case (needs to be already processed).
"""
if isinstance(X, TabularNNDataset):
return self._predict_tabular_data(new_data=X, process=False, predict_proba=True)
elif isinstance(X, pd.DataFrame):
X = self.preprocess(X, **kwargs)
return self._predict_tabular_data(new_data=X, process=True, predict_proba=True)
else:
raise ValueError("X must be of type pd.DataFrame or TabularNNDataset, not type: %s" % type(X))
def _predict_tabular_data(self, new_data, process=True, predict_proba=True): # TODO ensure API lines up with tabular.Model class.
""" Specific TabularNN method to produce predictions on new (unprocessed) data.
Returns 1D numpy array unless predict_proba=True and task is multi-class classification (not binary).
Args:
new_data (pd.Dataframe or TabularNNDataset): new data to make predictions on.
If you want to make prediction for just a single row of new_data, pass in: new_data.iloc[[row_index]]
process (bool): should new data be processed (if False, new_data must be TabularNNDataset)
predict_proba (bool): should we output class-probabilities (not used for regression)
"""
if process:
new_data = self.process_test_data(new_data, batch_size=self.batch_size, num_dataloading_workers=self.num_dataloading_workers_inference, labels=None)
if not isinstance(new_data, TabularNNDataset):
raise ValueError("new_data must of of type TabularNNDataset if process=False")
if self.problem_type == REGRESSION or not predict_proba:
preds = nd.zeros((new_data.num_examples,1))
else:
preds = nd.zeros((new_data.num_examples, self.num_net_outputs))
i = 0
for batch_idx, data_batch in enumerate(new_data.dataloader):
data_batch = new_data.format_batch_data(data_batch, self.ctx)
preds_batch = self.model(data_batch)
batch_size = len(preds_batch)
if self.problem_type != REGRESSION:
if not predict_proba: # need to take argmax
preds_batch = nd.argmax(preds_batch, axis=1, keepdims=True)
else: # need to take softmax
preds_batch = nd.softmax(preds_batch, axis=1)
preds[i:(i+batch_size)] = preds_batch
i = i+batch_size
if self.problem_type == REGRESSION or not predict_proba:
return preds.asnumpy().flatten() # return 1D numpy array
elif self.problem_type == BINARY and predict_proba:
return preds[:,1].asnumpy() # for binary problems, only return P(Y==+1)
return preds.asnumpy() # return 2D numpy array
def generate_datasets(self, X_train, y_train, params, X_val=None, y_val=None):
impute_strategy = params['proc.impute_strategy']
max_category_levels = params['proc.max_category_levels']
skew_threshold = params['proc.skew_threshold']
embed_min_categories = params['proc.embed_min_categories']
use_ngram_features = params['use_ngram_features']
if isinstance(X_train, TabularNNDataset):
train_dataset = X_train
else:
X_train = self.preprocess(X_train)
if self.features is None:
self.features = list(X_train.columns)
train_dataset = self.process_train_data(
df=X_train, labels=y_train, batch_size=self.batch_size, num_dataloading_workers=self.num_dataloading_workers,
impute_strategy=impute_strategy, max_category_levels=max_category_levels, skew_threshold=skew_threshold, embed_min_categories=embed_min_categories, use_ngram_features=use_ngram_features,
)
if X_val is not None:
if isinstance(X_val, TabularNNDataset):
val_dataset = X_val
else:
X_val = self.preprocess(X_val)
val_dataset = self.process_test_data(df=X_val, labels=y_val, batch_size=self.batch_size, num_dataloading_workers=self.num_dataloading_workers_inference)
else:
val_dataset = None
return train_dataset, val_dataset
def process_test_data(self, df, batch_size, num_dataloading_workers, labels=None):
""" Process train or test DataFrame into a form fit for neural network models.
Args:
df (pd.DataFrame): Data to be processed (X)
labels (pd.Series): labels to be processed (y)
test (bool): Is this test data where each datapoint should be processed separately using predetermined preprocessing steps.
Otherwise preprocessor uses all data to determine propreties like best scaling factors, number of categories, etc.
Returns:
Dataset object
"""
warnings.filterwarnings("ignore", module='sklearn.preprocessing') # sklearn processing n_quantiles warning
if labels is not None and len(labels) != len(df):
raise ValueError("Number of examples in Dataframe does not match number of labels")
if (self.processor is None or self._types_of_features is None
or self.feature_arraycol_map is None or self.feature_type_map is None):
raise ValueError("Need to process training data before test data")
if self.features_to_drop:
drop_cols = [col for col in df.columns if col in self.features_to_drop]
if drop_cols:
df = df.drop(columns=drop_cols)
df = self.processor.transform(df) # 2D numpy array. self.feature_arraycol_map, self.feature_type_map have been previously set while processing training data.
return TabularNNDataset(df, self.feature_arraycol_map, self.feature_type_map,
batch_size=batch_size, num_dataloading_workers=num_dataloading_workers,
problem_type=self.problem_type, labels=labels, is_test=True)
def process_train_data(self, df, batch_size, num_dataloading_workers, impute_strategy, max_category_levels, skew_threshold, embed_min_categories, use_ngram_features, labels):
""" Preprocess training data and create self.processor object that can be used to process future data.
This method should only be used once per TabularNeuralNetModel object, otherwise will produce Warning.
# TODO no label processing for now
# TODO: language features are ignored for now
# TODO: add time/ngram features
# TODO: no filtering of data-frame columns based on statistics, e.g. categorical columns with all unique variables or zero-variance features.
This should be done in default_learner class for all models not just TabularNeuralNetModel...
"""
warnings.filterwarnings("ignore", module='sklearn.preprocessing') # sklearn processing n_quantiles warning
if set(df.columns) != set(self.features):
raise ValueError("Column names in provided Dataframe do not match self.features")
if labels is None:
raise ValueError("Attempting process training data without labels")
if len(labels) != len(df):
raise ValueError("Number of examples in Dataframe does not match number of labels")
self._types_of_features, df = self._get_types_of_features(df, skew_threshold=skew_threshold, embed_min_categories=embed_min_categories, use_ngram_features=use_ngram_features) # dict with keys: : 'continuous', 'skewed', 'onehot', 'embed', 'language', values = column-names of df
logger.log(15, "AutoGluon Neural Network infers features are of the following types:")
logger.log(15, json.dumps(self._types_of_features, indent=4))
logger.log(15, "\n")
self.processor = self._create_preprocessor(impute_strategy=impute_strategy, max_category_levels=max_category_levels)
df = self.processor.fit_transform(df) # 2D numpy array
self.feature_arraycol_map = self._get_feature_arraycol_map(max_category_levels=max_category_levels) # OrderedDict of feature-name -> list of column-indices in df corresponding to this feature
num_array_cols = np.sum([len(self.feature_arraycol_map[key]) for key in self.feature_arraycol_map]) # should match number of columns in processed array
if num_array_cols != df.shape[1]:
raise ValueError("Error during one-hot encoding data processing for neural network. Number of columns in df array does not match feature_arraycol_map.")
self.feature_type_map = self._get_feature_type_map() # OrderedDict of feature-name -> feature_type string (options: 'vector', 'embed', 'language')
return TabularNNDataset(df, self.feature_arraycol_map, self.feature_type_map,
batch_size=batch_size, num_dataloading_workers=num_dataloading_workers,
problem_type=self.problem_type, labels=labels, is_test=False)
def setup_trainer(self, params, train_dataset=None):
""" Set up optimizer needed for training.
Network must first be initialized before this.
"""
optimizer_opts = {'learning_rate': params['learning_rate'], 'wd': params['weight_decay'], 'clip_gradient': params['clip_gradient']}
if 'lr_scheduler' in params and params['lr_scheduler'] is not None:
if train_dataset is None:
raise ValueError("train_dataset cannot be None when lr_scheduler is specified.")
base_lr = params.get('base_lr', 1e-6)
target_lr = params.get('target_lr', 1.0)
warmup_epochs = params.get('warmup_epochs', 10)
lr_decay = params.get('lr_decay', 0.1)
lr_mode = params['lr_scheduler']
num_batches = train_dataset.num_examples // params['batch_size']
lr_decay_epoch = [max(warmup_epochs, int(params['num_epochs']/3)), max(warmup_epochs+1, int(params['num_epochs']/2)),
max(warmup_epochs+2, int(2*params['num_epochs']/3))]
lr_scheduler = LRSequential([
LRScheduler('linear', base_lr=base_lr, target_lr=target_lr, nepochs=warmup_epochs, iters_per_epoch=num_batches),
LRScheduler(lr_mode, base_lr=target_lr, target_lr=base_lr, nepochs=params['num_epochs'] - warmup_epochs,
iters_per_epoch=num_batches, step_epoch=lr_decay_epoch, step_factor=lr_decay, power=2)
])
optimizer_opts['lr_scheduler'] = lr_scheduler
if params['optimizer'] == 'sgd':
if 'momentum' in params:
optimizer_opts['momentum'] = params['momentum']
optimizer = gluon.Trainer(self.model.collect_params(), 'sgd', optimizer_opts)
elif params['optimizer'] == 'adam': # TODO: Can we try AdamW?
optimizer = gluon.Trainer(self.model.collect_params(), 'adam', optimizer_opts)
else:
raise ValueError("Unknown optimizer specified: %s" % params['optimizer'])
return optimizer
@staticmethod
def convert_df_dtype_to_str(df):
return df.astype(str)
def _get_feature_arraycol_map(self, max_category_levels):
""" Returns OrderedDict of feature-name -> list of column-indices in processed data array corresponding to this feature """
feature_preserving_transforms = set(['continuous','skewed', 'ordinal', 'language']) # these transforms do not alter dimensionality of feature
feature_arraycol_map = {} # unordered version
current_colindex = 0
for transformer in self.processor.transformers_:
transformer_name = transformer[0]
transformed_features = transformer[2]
if transformer_name in feature_preserving_transforms:
for feature in transformed_features:
if feature in feature_arraycol_map:
raise ValueError("same feature is processed by two different column transformers: %s" % feature)
feature_arraycol_map[feature] = [current_colindex]
current_colindex += 1
elif transformer_name == 'onehot':
oh_encoder = [step for (name, step) in transformer[1].steps if name == 'onehot'][0]
for i in range(len(transformed_features)):
feature = transformed_features[i]
if feature in feature_arraycol_map:
raise ValueError("same feature is processed by two different column transformers: %s" % feature)
oh_dimensionality = min(len(oh_encoder.categories_[i]), max_category_levels+1)
feature_arraycol_map[feature] = list(range(current_colindex, current_colindex+oh_dimensionality))
current_colindex += oh_dimensionality
else:
raise ValueError("unknown transformer encountered: %s" % transformer_name)
return OrderedDict([(key, feature_arraycol_map[key]) for key in feature_arraycol_map])
def _get_feature_type_map(self):
""" Returns OrderedDict of feature-name -> feature_type string (options: 'vector', 'embed', 'language') """
if self.feature_arraycol_map is None:
raise ValueError("must first call _get_feature_arraycol_map() before _get_feature_type_map()")
vector_features = self._types_of_features['continuous'] + self._types_of_features['skewed'] + self._types_of_features['onehot']
feature_type_map = OrderedDict()
for feature_name in self.feature_arraycol_map:
if feature_name in vector_features:
feature_type_map[feature_name] = 'vector'
elif feature_name in self._types_of_features['embed']:
feature_type_map[feature_name] = 'embed'
elif feature_name in self._types_of_features['language']:
feature_type_map[feature_name] = 'language'
else:
raise ValueError("unknown feature type encountered")
return feature_type_map
def _create_preprocessor(self, impute_strategy, max_category_levels):
""" Defines data encoders used to preprocess different data types and creates instance variable which is sklearn ColumnTransformer object """
if self.processor is not None:
Warning("Attempting to process training data for TabularNeuralNetModel, but previously already did this.")
continuous_features = self._types_of_features['continuous']
skewed_features = self._types_of_features['skewed']
onehot_features = self._types_of_features['onehot']
embed_features = self._types_of_features['embed']
language_features = self._types_of_features['language']
transformers = [] # order of various column transformers in this list is important!
if continuous_features:
continuous_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy=impute_strategy)),
('scaler', StandardScaler())])
transformers.append( ('continuous', continuous_transformer, continuous_features) )
if skewed_features:
power_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy=impute_strategy)),
('quantile', QuantileTransformer(output_distribution='normal')) ]) # Or output_distribution = 'uniform'
transformers.append( ('skewed', power_transformer, skewed_features) )
if onehot_features:
onehot_transformer = Pipeline(steps=[
# TODO: Consider avoiding converting to string for improved memory efficiency
('to_str', FunctionTransformer(self.convert_df_dtype_to_str)),
('imputer', SimpleImputer(strategy='constant', fill_value=self.unique_category_str)),
('onehot', OneHotMergeRaresHandleUnknownEncoder(max_levels=max_category_levels, sparse=False))]) # test-time unknown values will be encoded as all zeros vector
transformers.append( ('onehot', onehot_transformer, onehot_features) )
if embed_features: # Ordinal transformer applied to convert to-be-embedded categorical features to integer levels
ordinal_transformer = Pipeline(steps=[
('to_str', FunctionTransformer(self.convert_df_dtype_to_str)),
('imputer', SimpleImputer(strategy='constant', fill_value=self.unique_category_str)),
('ordinal', OrdinalMergeRaresHandleUnknownEncoder(max_levels=max_category_levels))]) # returns 0-n when max_category_levels = n-1. category n is reserved for unknown test-time categories.
transformers.append( ('ordinal', ordinal_transformer, embed_features) )
if language_features:
raise NotImplementedError("language_features cannot be used at the moment")
return ColumnTransformer(transformers=transformers) # numeric features are processed in the same order as in numeric_features vector, so feature-names remain the same.
def save(self, path: str = None, verbose=True) -> str:
if self.model is not None:
self._architecture_desc = self.model.architecture_desc
temp_model = self.model
temp_sw = self.summary_writer
self.model = None
self.summary_writer = None
path_final = super().save(path=path, verbose=verbose)
self.model = temp_model
self.summary_writer = temp_sw
self._architecture_desc = None
# Export model
if self.model is not None:
params_filepath = path_final + self.params_file_name
# TODO: Don't use os.makedirs here, have save_parameters function in tabular_nn_model that checks if local path or S3 path
os.makedirs(os.path.dirname(path_final), exist_ok=True)
self.model.save_parameters(params_filepath)
return path_final
@classmethod
def load(cls, path: str, reset_paths=True, verbose=True):
model: TabularNeuralNetModel = super().load(path=path, reset_paths=reset_paths, verbose=verbose)
if model._architecture_desc is not None:
model.model = EmbedNet(architecture_desc=model._architecture_desc, ctx=model.ctx) # recreate network from architecture description
model._architecture_desc = None
# TODO: maybe need to initialize/hybridize?
model.model.load_parameters(model.path + model.params_file_name, ctx=model.ctx)
model.summary_writer = None
return model
def hyperparameter_tune(self, X_train, y_train, X_val, y_val, scheduler_options, **kwargs):
time_start = time.time()
""" Performs HPO and sets self.params to best hyperparameter values """
self.verbosity = kwargs.get('verbosity', 2)
logger.log(15, "Beginning hyperparameter tuning for Neural Network...")
self._set_default_searchspace() # changes non-specified default hyperparams from fixed values to search-spaces.
if self.feature_metadata is None:
raise ValueError("Trainer class must set feature_metadata for this model")
scheduler_func = scheduler_options[0]
scheduler_options = scheduler_options[1]
if scheduler_func is None or scheduler_options is None:
raise ValueError("scheduler_func and scheduler_options cannot be None for hyperparameter tuning")
num_cpus = scheduler_options['resource']['num_cpus']
# num_gpus = scheduler_options['resource']['num_gpus'] # TODO: Currently unused
params_copy = self.params.copy()
self.num_dataloading_workers = max(1, int(num_cpus/2.0))
self.batch_size = params_copy['batch_size']
train_dataset, val_dataset = self.generate_datasets(X_train=X_train, y_train=y_train, params=params_copy, X_val=X_val, y_val=y_val)
train_path = self.path + "train"
val_path = self.path + "validation"
train_dataset.save(file_prefix=train_path)
val_dataset.save(file_prefix=val_path)
if not np.any([isinstance(params_copy[hyperparam], Space) for hyperparam in params_copy]):
logger.warning("Warning: Attempting to do hyperparameter optimization without any search space (all hyperparameters are already fixed values)")
else:
logger.log(15, "Hyperparameter search space for Neural Network: ")
for hyperparam in params_copy:
if isinstance(params_copy[hyperparam], Space):
logger.log(15, str(hyperparam)+ ": "+str(params_copy[hyperparam]))
util_args = dict(
train_path=train_path,
val_path=val_path,
model=self,
time_start=time_start,
time_limit=scheduler_options['time_out']
)
tabular_nn_trial.register_args(util_args=util_args, **params_copy)
scheduler = scheduler_func(tabular_nn_trial, **scheduler_options)
if ('dist_ip_addrs' in scheduler_options) and (len(scheduler_options['dist_ip_addrs']) > 0):
# TODO: Ensure proper working directory setup on remote machines
# This is multi-machine setting, so need to copy dataset to workers:
logger.log(15, "Uploading preprocessed data to remote workers...")
scheduler.upload_files([train_path+TabularNNDataset.DATAOBJ_SUFFIX,
train_path+TabularNNDataset.DATAVALUES_SUFFIX,
val_path+TabularNNDataset.DATAOBJ_SUFFIX,
val_path+TabularNNDataset.DATAVALUES_SUFFIX]) # TODO: currently does not work.
logger.log(15, "uploaded")
scheduler.run()
scheduler.join_jobs()
scheduler.get_training_curves(plot=False, use_legend=False)
return self._get_hpo_results(scheduler=scheduler, scheduler_options=scheduler_options, time_start=time_start)
def get_info(self):
info = super().get_info()
info['hyperparameters_post_fit'] = self.params_post_fit
return info
def reduce_memory_size(self, remove_fit=True, requires_save=True, **kwargs):
super().reduce_memory_size(remove_fit=remove_fit, requires_save=requires_save, **kwargs)
if remove_fit and requires_save:
self.optimizer = None
""" General TODOs:
- Automatically decrease batch-size if memory issue arises
- Retrain final NN on full dataset (train+val). How to ensure stability here?
- OrdinalEncoder class in sklearn currently cannot handle rare categories or unknown ones at test-time, so we have created our own Encoder in category_encoders.py
There is open PR in sklearn to address this: https://github.com/scikit-learn/scikit-learn/pull/13833/files
Currently, our code uses category_encoders package (BSD license) instead: https://github.com/scikit-learn-contrib/categorical-encoding
Once PR is merged into sklearn, may want to switch: category_encoders.Ordinal -> sklearn.preprocessing.OrdinalEncoder in preprocess_train_data()
- Save preprocessed data so that we can do HPO of neural net hyperparameters more efficiently, while also doing HPO of preprocessing hyperparameters?
Naive full HPO method requires redoing preprocessing in each trial even if we did not change preprocessing hyperparameters.
Alternative is we save each proprocessed dataset & corresponding TabularNeuralNetModel object with its unique param names in the file. Then when we try a new HP-config, we first try loading from file if one exists.
"""
|
py | 1a5236670d419ad20fea1a4408b03357ccb0a377 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='HomePage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('image_1', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe9\xa6\x96\xe9\xa1\xb5\xe6\xb5\xb7\xe6\x8a\xa5\xe5\x9b\xbe1')),
('url_1', models.CharField(max_length=1000, verbose_name=b'\xe9\xa6\x96\xe9\xa1\xb5\xe6\xb5\xb7\xe6\x8a\xa5\xe5\x9b\xbe1\xe9\x93\xbe\xe6\x8e\xa5')),
('image_2', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe9\xa6\x96\xe9\xa1\xb5\xe6\xb5\xb7\xe6\x8a\xa5\xe5\x9b\xbe2')),
('url_2', models.CharField(max_length=1000, verbose_name=b'\xe9\xa6\x96\xe9\xa1\xb5\xe6\xb5\xb7\xe6\x8a\xa5\xe5\x9b\xbe2\xe9\x93\xbe\xe6\x8e\xa5')),
('image_3', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe9\xa6\x96\xe9\xa1\xb5\xe6\xb5\xb7\xe6\x8a\xa5\xe5\x9b\xbe3')),
('url_3', models.CharField(max_length=1000, verbose_name=b'\xe9\xa6\x96\xe9\xa1\xb5\xe6\xb5\xb7\xe6\x8a\xa5\xe5\x9b\xbe3\xe9\x93\xbe\xe6\x8e\xa5')),
('image_4', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe9\xa6\x96\xe9\xa1\xb5\xe6\xb5\xb7\xe6\x8a\xa5\xe5\x9b\xbe4')),
('url_4', models.CharField(max_length=1000, verbose_name=b'\xe9\xa6\x96\xe9\xa1\xb5\xe6\xb5\xb7\xe6\x8a\xa5\xe5\x9b\xbe4\xe9\x93\xbe\xe6\x8e\xa5')),
('image_about', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe9\xa6\x96\xe9\xa1\xb5\xe5\x85\xb3\xe4\xba\x8e\xe6\x88\x91\xe4\xbb\xac\xe5\x9b\xbe\xe7\x89\x87')),
('data_about', models.CharField(max_length=1000, verbose_name=b'\xe9\xa6\x96\xe9\xa1\xb5\xe5\x85\xb3\xe4\xba\x8e\xe6\x88\x91\xe4\xbb\xac\xe7\xae\x80\xe4\xbb\x8b')),
('url_about', models.CharField(max_length=1000, verbose_name=b'\xe9\xa6\x96\xe9\xa1\xb5\xe5\x85\xb3\xe4\xba\x8e\xe6\x88\x91\xe4\xbb\xac\xe7\xae\x80\xe4\xbb\x8b\xe9\x93\xbe\xe6\x8e\xa5')),
('image_shengtaizhongzhi_1', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe7\xa7\x8d\xe6\xa4\x8d\xe5\x9b\xbe\xe7\x89\x871')),
('data_shengtaizhongzhi_1', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe7\xa7\x8d\xe6\xa4\x8d\xe7\xae\x80\xe4\xbb\x8b1')),
('url_shengtaizhongzhi_1', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe7\xa7\x8d\xe6\xa4\x8d\xe7\xae\x80\xe4\xbb\x8b1\xe9\x93\xbe\xe6\x8e\xa5')),
('image_shengtaizhongzhi_2', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe7\xa7\x8d\xe6\xa4\x8d\xe5\x9b\xbe\xe7\x89\x872')),
('data_shengtaizhongzhi_2', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe7\xa7\x8d\xe6\xa4\x8d\xe7\xae\x80\xe4\xbb\x8b2')),
('url_shengtaizhongzhi_2', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe7\xa7\x8d\xe6\xa4\x8d\xe7\xae\x80\xe4\xbb\x8b2\xe9\x93\xbe\xe6\x8e\xa5')),
('image_shengtaizhongzhi_3', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe7\xa7\x8d\xe6\xa4\x8d\xe5\x9b\xbe\xe7\x89\x873')),
('data_shengtaizhongzhi_3', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe7\xa7\x8d\xe6\xa4\x8d\xe7\xae\x80\xe4\xbb\x8b3')),
('url_shengtaizhongzhi_3', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe7\xa7\x8d\xe6\xa4\x8d\xe7\xae\x80\xe4\xbb\x8b3\xe9\x93\xbe\xe6\x8e\xa5')),
('image_shengtaizhongzhi_4', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe7\xa7\x8d\xe6\xa4\x8d\xe5\x9b\xbe\xe7\x89\x874')),
('data_shengtaizhongzhi_4', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe7\xa7\x8d\xe6\xa4\x8d\xe7\xae\x80\xe4\xbb\x8b4')),
('url_shengtaizhongzhi_4', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe7\xa7\x8d\xe6\xa4\x8d\xe7\xae\x80\xe4\xbb\x8b4\xe9\x93\xbe\xe6\x8e\xa5')),
('image_shengtaiyangzhi_1', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe5\x85\xbb\xe6\xae\x96\xe5\x9b\xbe\xe7\x89\x871')),
('data_shengtaiyangzhi_1', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe5\x85\xbb\xe6\xae\x96\xe7\xae\x80\xe4\xbb\x8b1')),
('url_shengtaiyangzhi_1', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe5\x85\xbb\xe6\xae\x96\xe7\xae\x80\xe4\xbb\x8b1\xe9\x93\xbe\xe6\x8e\xa5')),
('image_shengtaiyangzhi_2', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe5\x85\xbb\xe6\xae\x96\xe5\x9b\xbe\xe7\x89\x872')),
('data_shengtaiyangzhi_2', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe5\x85\xbb\xe6\xae\x96\xe7\xae\x80\xe4\xbb\x8b2')),
('url_shengtaiyangzhi_2', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe5\x85\xbb\xe6\xae\x96\xe7\xae\x80\xe4\xbb\x8b2\xe9\x93\xbe\xe6\x8e\xa5')),
('image_shengtaiyangzhi_3', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe5\x85\xbb\xe6\xae\x96\xe5\x9b\xbe\xe7\x89\x873')),
('data_shengtaiyangzhi_3', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe5\x85\xbb\xe6\xae\x96\xe7\xae\x80\xe4\xbb\x8b3')),
('url_shengtaiyangzhi_3', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe5\x85\xbb\xe6\xae\x96\xe7\xae\x80\xe4\xbb\x8b3\xe9\x93\xbe\xe6\x8e\xa5')),
('image_shengtaiyangzhi_4', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe5\x85\xbb\xe6\xae\x96\xe5\x9b\xbe\xe7\x89\x874')),
('data_shengtaiyangzhi_4', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe5\x85\xbb\xe6\xae\x96\xe7\xae\x80\xe4\xbb\x8b4')),
('url_shengtaiyangzhi_4', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe5\x85\xbb\xe6\xae\x96\xe7\xae\x80\xe4\xbb\x8b4\xe9\x93\xbe\xe6\x8e\xa5')),
('image_kuangchanziyuan_1', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe7\x9f\xbf\xe4\xba\xa7\xe8\xb5\x84\xe6\xba\x90\xe5\x9b\xbe\xe7\x89\x871')),
('data_kuangchanziyuan_1', models.CharField(max_length=1000, verbose_name=b'\xe7\x9f\xbf\xe4\xba\xa7\xe8\xb5\x84\xe6\xba\x90\xe7\xae\x80\xe4\xbb\x8b1')),
('url_kuangchanziyuan_1', models.CharField(max_length=1000, verbose_name=b'\xe7\x9f\xbf\xe4\xba\xa7\xe8\xb5\x84\xe6\xba\x90\xe7\xae\x80\xe4\xbb\x8b1\xe9\x93\xbe\xe6\x8e\xa5')),
('image_kuangchanziyuan_2', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe7\x9f\xbf\xe4\xba\xa7\xe8\xb5\x84\xe6\xba\x90\xe5\x9b\xbe\xe7\x89\x872')),
('data_kuangchanziyuan_2', models.CharField(max_length=1000, verbose_name=b'\xe7\x9f\xbf\xe4\xba\xa7\xe8\xb5\x84\xe6\xba\x90\xe7\xae\x80\xe4\xbb\x8b2')),
('url_kuangchanziyuan_2', models.CharField(max_length=1000, verbose_name=b'\xe7\x9f\xbf\xe4\xba\xa7\xe8\xb5\x84\xe6\xba\x90\xe7\xae\x80\xe4\xbb\x8b2\xe9\x93\xbe\xe6\x8e\xa5')),
('image_kuangchanziyuan_3', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe7\x9f\xbf\xe4\xba\xa7\xe8\xb5\x84\xe6\xba\x90\xe5\x9b\xbe\xe7\x89\x873')),
('data_kuangchanziyuan_3', models.CharField(max_length=1000, verbose_name=b'\xe7\x9f\xbf\xe4\xba\xa7\xe8\xb5\x84\xe6\xba\x90\xe7\xae\x80\xe4\xbb\x8b3')),
('url_kuangchanziyuan_3', models.CharField(max_length=1000, verbose_name=b'\xe7\x9f\xbf\xe4\xba\xa7\xe8\xb5\x84\xe6\xba\x90\xe7\xae\x80\xe4\xbb\x8b3\xe9\x93\xbe\xe6\x8e\xa5')),
('image_kuangchanziyuan_4', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe7\x9f\xbf\xe4\xba\xa7\xe8\xb5\x84\xe6\xba\x90\xe5\x9b\xbe\xe7\x89\x874')),
('data_kuangchanziyuan_4', models.CharField(max_length=1000, verbose_name=b'\xe7\x9f\xbf\xe4\xba\xa7\xe8\xb5\x84\xe6\xba\x90\xe7\xae\x80\xe4\xbb\x8b4')),
('url_kuangchanziyuan_4', models.CharField(max_length=1000, verbose_name=b'\xe7\x9f\xbf\xe4\xba\xa7\xe8\xb5\x84\xe6\xba\x90\xe7\xae\x80\xe4\xbb\x8b4\xe9\x93\xbe\xe6\x8e\xa5')),
],
options={
'verbose_name': '\u9996\u9875\u8bbe\u7f6e',
'verbose_name_plural': '\u9996\u9875\u8bbe\u7f6e',
},
),
]
|
py | 1a5236784af1f89059e8ae92053c4ce60b68253e | # -*- coding: utf-8 -*-
from django.test import TestCase
class CacheHelperHelpersTest(TestCase):
def test_ns(self):
"""
Tests that: * "app:module:method" == "app:module:method[anonymous]"
* "app:module:method", l={} == "app:module:method[anonymous](l={})"
* "app:module:method", a=2, b="plop" == "app:module:method[anonymous](a=2|b=plop)"
"""
from _commons.helpers.cache import CacheHelper
self.assertEqual(CacheHelper.ns('app:module:method', hash=False), 'app:module:method[anonymous]')
self.assertEqual(CacheHelper.ns('app:module:method', l={}, hash=False), 'app:module:method[anonymous](l={})')
self.assertEqual(CacheHelper.ns('app:module:method', a=2, b='plop', hash=False), 'app:module:method[anonymous](a=2|b=plop)')
class DurationsHelpersTest(TestCase):
def test_humanize(self):
"""
Tests that: * 60s == "1 minute"
* 600s == "10 minutes"
* 7200 == "2 hours"
* 86400 == "1 day"
"""
from _commons.helpers.durations import humanize
self.assertEqual(humanize(60), '1 minute')
self.assertEqual(humanize(600), '10 minutes')
self.assertEqual(humanize(7200), '2 hours')
self.assertEqual(humanize(86400), '1 day')
class LevelsHelperHelpersTest(TestCase):
def test_encode(self):
"""
Tests that: * "dummy" == 0
* "intermediate" == 2
"""
from _commons.helpers.levels import LevelsHelper
self.assertEqual(LevelsHelper.encode('dummy'), 0)
self.assertEqual(LevelsHelper.encode('intermediate'), 2)
def test_reverse(self):
"""
Tests that: * 1 == "novice"
* 3 == "advanced"
"""
from _commons.helpers.levels import LevelsHelper
self.assertEqual(LevelsHelper.reverse(1)[0], 'novice')
self.assertEqual(LevelsHelper.reverse(3)[0], 'advanced')
class NumbersHelpersTest(TestCase):
def test_percentage_of(self):
"""
Tests that: * 30,0 == 0
* 0,1 == 0
* 10,50 == 20
* 25,100 == 25
* 10,10 == 100
"""
from _commons.helpers.numbers import percentage_of
self.assertEqual(percentage_of(30,0), 100)
self.assertEqual(percentage_of(0,1), 0)
self.assertEqual(percentage_of(10,50), 16)
self.assertEqual(percentage_of(25,100), 20)
self.assertEqual(percentage_of(10,10), 50)
class RedirectsHelpersTest(TestCase):
def test_login_required_url(self):
"""
Tests that: * "/dashboard/tutorial/new/" == "/account/login/?next=%2Fdashboard%2Ftutorial%2Fnew%2F&required"
"""
from _commons.helpers.redirects import login_required_url
self.assertEqual(\
login_required_url('/dashboard/tutorial/new/'),\
'/account/login/?next=%2Fdashboard%2Ftutorial%2Fnew%2F&required'\
)
def test_register_required_url(self):
"""
Tests that: * None == "/account/register/"
"""
from _commons.helpers.redirects import register_required_url
self.assertEqual(register_required_url(), '/account/register/')
class StatusesHelperHelpersTest(TestCase):
def test_encode(self):
"""
Tests that: * "moderated" == 1
* "accepted" == 2
"""
from _commons.helpers.statuses import StatusesHelper
self.assertEqual(StatusesHelper.encode('moderated'), 1)
self.assertEqual(StatusesHelper.encode('accepted'), 2)
def test_reverse(self):
"""
Tests that: * 1 == "moderated"
* 2 == "accepted"
"""
from _commons.helpers.statuses import StatusesHelper
self.assertEqual(StatusesHelper.reverse(1)[0], 'moderated')
self.assertEqual(StatusesHelper.reverse(2)[0], 'accepted')
class StringsHelpersTest(TestCase):
def test_strip_accents(self):
"""
Tests that: * "valérian" == "valerian"
* "ALLÔ" == "ALLO"
"""
from _commons.helpers.strings import StringsHelper
self.assertEqual(StringsHelper.strip_accents(u'valérian'), 'valerian')
self.assertEqual(StringsHelper.strip_accents(u'ALLÔ'), 'ALLO')
def test_downcode(self):
"""
Tests that: * u"Καλημέρα Joe!" == "Kalhmera Joe!"
* "Test Normal" == "Test Normal"
"""
from _commons.helpers.strings import StringsHelper
self.assertEqual(StringsHelper.downcode(u'Καλημέρα Joe!'), 'Kalhmera Joe!')
self.assertEqual(StringsHelper.downcode('Test Normal'), 'Test Normal')
class TypesHelperHelpersTest(TestCase):
def test_encode(self):
"""
Tests that: * "comment" == 0
* "tutorial" == 1
"""
from _commons.helpers.types import TypesHelper
self.assertEqual(TypesHelper.encode('comment'), 0)
self.assertEqual(TypesHelper.encode('tutorial'), 1)
def test_reverse(self):
"""
Tests that: * 0 == "comment"
* 1 == "tutorial"
"""
from _commons.helpers.types import TypesHelper
self.assertEqual(TypesHelper.reverse(0), 'comment')
self.assertEqual(TypesHelper.reverse(1), 'tutorial')
|
py | 1a523708135b6d970e05c222f76d5028030eb0bc | import pandas as pd
chrom_sizes = pd.Series(
{1: 249250621,
10: 135534747,
11: 135006516,
12: 133851895,
13: 115169878,
14: 107349540,
15: 102531392,
16: 90354753,
17: 81195210,
18: 78077248,
19: 59128983,
2: 243199373,
20: 63025520,
21: 48129895,
22: 51304566,
3: 198022430,
4: 191154276,
5: 180915260,
6: 171115067,
7: 159138663,
8: 146364022,
9: 141213431,
}
)
chrom_sizes_norm = chrom_sizes / chrom_sizes.max()
def _make_tableau20():
# tableau20 from # http://www.randalolson.com/2014/06/28/how-to-make-beautiful-data-visualizations-in-python-with-matplotlib/
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib
# accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
return tableau20
tableau20 = _make_tableau20()
def generate_null_snvs(df, snvs, num_null_sets=5):
"""
Generate a set of null SNVs based on an input list of SNVs and categorical
annotations.
Parameters
----------
df : pandas.DataFrame
Pandas dataframe where each column is a categorization of SNPs.
The index should be SNPs of the form chrom:pos.
snvs : list
List of input SNVs in the format chrom:pos. Entries that aren't in
the index of df will be dropped.
num_null_sets : int
Number of sets of null SNVs to generate.
Returns
-------
null_sets : pandas.Dataframe
Pandas dataframe with input SNVs as first column and null SNVs as
following columns.
"""
import numpy as np
import random
random.seed(20151007)
input_snvs = list(set(df.index) & set(snvs))
sig = df.ix[input_snvs]
not_sig = df.ix[set(df.index) - set(snvs)]
sig['group'] = sig.apply(lambda x: '::'.join(x), axis=1)
not_sig['group'] = not_sig.apply(lambda x: '::'.join(x), axis=1)
null_sets = []
vc = sig.group.value_counts()
bins = {c:sorted(list(df[c].value_counts().index)) for c in df.columns}
ordered_inputs = []
for i in vc.index:
ordered_inputs += list(sig[sig.group == i].index)
tdf = not_sig[not_sig.group == i]
count = vc[i]
for n in range(num_null_sets):
if tdf.shape[0] == 0:
groups = [i]
while tdf.shape[0] == 0:
# If there are no potential null SNVs in this group, we'll
# expand the group randomly.
g = groups[-1]
# Choose random bin.
cols = list(not_sig.columns)
cols.remove('group')
b = random.choice(cols)
# Get possibilities for that bin.
t = bins[b]
# Get last set of bin values and the value for the bin we
# want to change.
d = dict(list(zip(not_sig.columns, g.split('::'))))
cat = d[b]
# Randomly walk away from bin value.
ind = t.index(cat)
if ind == 0:
ind += 1
elif ind == len(t) - 1:
ind -= 1
else:
ind += random.choice([-1, 1])
d[b] = t[ind]
groups.append('::'.join(pd.Series(d)[not_sig.columns].astype(str)))
tdf = not_sig[not_sig.group.apply(lambda x: x in groups)]
if count <= tdf.shape[0]:
ind = random.sample(tdf.index, count)
else:
ind = list(np.random.choice(tdf.index, size=count, replace=True))
if i == vc.index[0]:
null_sets.append(ind)
else:
null_sets[n] += ind
null_sets = pd.DataFrame(null_sets).T
null_sets.columns = ['null_{}'.format(x) for x in null_sets.columns]
cs = list(null_sets.columns)
null_sets['input'] = ordered_inputs
null_sets = null_sets[['input'] + cs]
return null_sets
def make_grasp_phenotype_file(fn, pheno, out):
"""
Subset the GRASP database on a specific phenotype.
Parameters
----------
fn : str
Path to GRASP database file.
pheno : str
Phenotype to extract from database.
out : sttr
Path to output file for subset of GRASP database.
"""
import subprocess
c = 'awk -F "\\t" \'NR == 1 || $12 == "{}" \' {} > {}'.format(
pheno.replace("'", '\\x27'), fn, out)
subprocess.check_call(c, shell=True)
def parse_grasp_gwas(fn):
"""
Read GRASP database and filter for unique hits.
Parameters
----------
fn : str
Path to (subset of) GRASP database.
Returns
-------
df : pandas.DataFrame
Pandas dataframe with de-duplicated, significant SNPs. The index is of
the form chrom:pos where pos is the one-based position of the SNP. The
columns are chrom, start, end, rsid, and pvalue. rsid may be empty or
not actually an RSID. chrom, start, end make a zero-based bed file with
the SNP coordinates.
"""
df = pd.read_table(fn, low_memory=False)
df = df[df.Pvalue < 1e-5]
df = df.sort(columns=['chr(hg19)', 'pos(hg19)', 'Pvalue'])
df = df.drop_duplicates(subset=['chr(hg19)', 'pos(hg19)'])
df = df[df.Pvalue < 1e-5]
df['chrom'] = 'chr' + df['chr(hg19)'].astype(str)
df['end'] = df['pos(hg19)']
df['start'] = df.end - 1
df['rsid'] = df['SNPid(in paper)']
df['pvalue'] = df['Pvalue']
df = df[['chrom', 'start', 'end', 'rsid', 'pvalue']]
df.index = df['chrom'].astype(str) + ':' + df['end'].astype(str)
return df
def parse_roadmap_gwas(fn):
"""
Read Roadmap GWAS file and filter for unique, significant (p < 1e-5)
SNPs.
Parameters
----------
fn : str
Path to (subset of) GRASP database.
Returns
-------
df : pandas.DataFrame
Pandas dataframe with de-duplicated, significant SNPs. The index is of
the form chrom:pos where pos is the one-based position of the SNP. The
columns are chrom, start, end, rsid, and pvalue. rsid may be empty or
not actually an RSID. chrom, start, end make a zero-based bed file with
the SNP coordinates.
"""
df = pd.read_table(fn, low_memory=False,
names=['chrom', 'start', 'end', 'rsid', 'pvalue'])
df = df[df.pvalue < 1e-5]
df = df.sort(columns=['chrom', 'start', 'pvalue'])
df = df.drop_duplicates(subset=['chrom', 'start'])
df = df[df['chrom'] != 'chrY']
df.index = df['chrom'].astype(str) + ':' + df['end'].astype(str)
return df
def ld_prune(df, ld_beds, snvs=None):
"""
Prune set of GWAS based on LD and significance. A graph of all SNVs is
constructed with edges for LD >= 0.8 and the most significant SNV per
connected component is kept.
Parameters
----------
df : pandas.DataFrame
Pandas dataframe with unique SNVs. The index is of the form chrom:pos
where pos is the one-based position of the SNV. The columns must include
chrom, start, end, and pvalue. chrom, start, end make a zero-based bed
file with the SNV coordinates.
ld_beds : dict
Dict whose keys are chromosomes and whose values are filenames of
tabixed LD bed files. An LD bed file looks like "chr1 11007 11008
11008:11012:1" where the first three columns are the zero-based
half-open coordinate of the SNV and the fourth column has the one-based
coordinate followed of the SNV followed by the one-based coordinate of a
different SNV and the LD between them. In this example, the variants are
in perfect LD. The bed file should also contain the reciprocal line for
this LD relationship: "chr1 11011 11012 11012:11008:1".
snvs : list
List of SNVs to filter against. If a SNV is not in this list, it will
not be included. If you are working with GWAS SNPs, this is useful for
filtering out SNVs that aren't in the SNPsnap database for instance.
Returns
-------
out : pandas.DataFrame
Pandas dataframe in the same format as the input dataframe but with only
independent SNVs.
"""
import networkx as nx
import tabix
if snvs:
df = df.ix[set(df.index) & set(snvs)]
keep = set()
for chrom in list(ld_beds.keys()):
tdf = df[df['chrom'].astype(str) == chrom]
if tdf.shape[0] > 0:
f = tabix.open(ld_beds[chrom])
# Make a dict where each key is a SNP and the values are all of the
# other SNPs in LD with the key.
ld_d = {}
for j in tdf.index:
p = tdf.ix[j, 'end']
ld_d[p] = []
try:
r = f.query(chrom, p - 1, p)
while True:
try:
n = next(r)
p1, p2, r2 = n[-1].split(':')
if float(r2) >= 0.8:
ld_d[p].append(int(p2))
except StopIteration:
break
except TabixError:
continue
# Make adjacency matrix for LD.
cols = sorted(list(set(
[item for sublist in list(ld_d.values()) for item in sublist])))
t = pd.DataFrame(0, index=list(ld_d.keys()), columns=cols)
for k in list(ld_d.keys()):
t.ix[k, ld_d[k]] = 1
t.index = ['{}:{}'.format(chrom, x) for x in t.index]
t.columns = ['{}:{}'.format(chrom, x) for x in t.columns]
# Keep all SNPs not in LD with any others. These will be in the index
# but not in the columns.
keep |= set(t.index) - set(t.columns)
# Filter so we only have SNPs that are in LD with at least one other
# SNP.
ind = list(set(t.columns) & set(t.index))
# Keep one most sig. SNP per connected subgraph.
t = t.ix[ind, ind]
g = nx.Graph(t.values)
c = nx.connected_components(g)
while True:
try:
sg = next(c)
s = tdf.ix[t.index[list(sg)]]
keep.add(s[s.pvalue == s.pvalue.min()].index[0])
except StopIteration:
break
out = df.ix[keep]
return out
def ld_expand(df, ld_beds):
"""
Expand a set of SNVs into all SNVs with LD >= 0.8 and return a BedTool of
the expanded SNPs.
Parameters
----------
df : pandas.DataFrame
Pandas dataframe with SNVs. The index is of the form chrom:pos where pos
is the one-based position of the SNV. The columns are chrom, start, end.
chrom, start, end make a zero-based bed file with the SNV coordinates.
ld_beds : dict
Dict whose keys are chromosomes and whose values are filenames of
tabixed LD bed files. The LD bed files should be formatted like this:
chr1 14463 14464 14464:51479:0.254183
where the the first three columns indicate the zero-based coordinates of
a SNV and the the fourth column has the one-based coordinate of that
SNV, the one-based coordinate of another SNV on the same chromosome, and
the LD between these SNVs (all separated by colons).
Returns
-------
bt : pybedtools.BedTool
BedTool with input SNVs and SNVs they are in LD with.
indepdent SNVs.
"""
import pybedtools as pbt
import tabix
out_snps = []
for chrom in list(ld_beds.keys()):
t = tabix.open(ld_beds[chrom])
tdf = df[df['chrom'].astype(str) == chrom]
for ind in tdf.index:
p = tdf.ix[ind, 'end']
out_snps.append('{}\t{}\t{}\t{}\n'.format(chrom, p - 1, p, ind))
try:
r = t.query('{}'.format(chrom), p - 1, p)
while True:
try:
n = next(r)
p1, p2, r2 = n[-1].split(':')
if float(r2) >= 0.8:
out_snps.append('{}\t{}\t{}\t{}\n'.format(
n[0], int(p2) - 1, int(p2), ind))
except StopIteration:
break
except tabix.TabixError:
continue
bt = pbt.BedTool(''.join(out_snps), from_string=True)
bt = bt.sort()
return bt
def liftover_bed(
bed,
chain,
mapped=None,
unmapped=None,
liftOver_path='liftOver',
):
"""
Lift over a bed file using a given chain file.
Parameters
----------
bed : str or pybedtools.BedTool
Coordinates to lift over.
chain : str
Path to chain file to use for lift over.
mapped : str
Path for bed file with coordinates that are lifted over correctly.
unmapped : str
Path for text file to store coordinates that did not lift over
correctly. If this is not provided, these are discarded.
liftOver_path : str
Path to liftOver executable if not in path.
Returns
-------
new_coords : pandas.DataFrame
Pandas data frame with lift over results. Index is old coordinates in
the form chrom:start-end and columns are chrom, start, end and loc
(chrom:start-end) in new coordinate system.
"""
import subprocess
import pybedtools as pbt
if mapped == None:
import tempfile
mapped = tempfile.NamedTemporaryFile()
mname = mapped.name
else:
mname = mapped
if unmapped == None:
import tempfile
unmapped = tempfile.NamedTemporaryFile()
uname = unmapped.name
else:
uname = unmapped
if type(bed) == str:
bt = pbt.BedTool(bed)
elif type(bed) == pbt.bedtool.BedTool:
bt = bed
else:
sys.exit(1)
bt = bt.sort()
c = '{} {} {} {} {}'.format(liftOver_path, bt.fn, chain, mname, uname)
subprocess.check_call(c, shell=True)
with open(uname) as f:
missing = pbt.BedTool(''.join([x for x in f.readlines()[1::2]]),
from_string=True)
bt = bt.subtract(missing)
bt_mapped = pbt.BedTool(mname)
old_loc = []
for r in bt:
old_loc.append('{}:{}-{}'.format(r.chrom, r.start, r.end))
new_loc = []
new_chrom = []
new_start = []
new_end = []
for r in bt_mapped:
new_loc.append('{}:{}-{}'.format(r.chrom, r.start, r.end))
new_chrom.append(r.chrom)
new_start.append(r.start)
new_end.append(r.end)
new_coords = pd.DataFrame({'loc':new_loc, 'chrom': new_chrom,
'start': new_start, 'end': new_end},
index=old_loc)
for f in [mapped, unmapped]:
try:
f.close()
except AttributeError:
continue
return new_coords
def deseq2_size_factors(counts, meta, design):
"""
Get size factors for counts using DESeq2.
Parameters
----------
counts : pandas.DataFrame
Counts to pass to DESeq2.
meta : pandas.DataFrame
Pandas dataframe whose index matches the columns of counts. This is
passed to DESeq2's colData.
design : str
Design like ~subject_id that will be passed to DESeq2. The design
variables should match columns in meta.
Returns
-------
sf : pandas.Series
Series whose index matches the columns of counts and whose values are
the size factors from DESeq2. Divide each column by its size factor to
obtain normalized counts.
"""
import rpy2.robjects as r
from rpy2.robjects import pandas2ri
pandas2ri.activate()
r.r('suppressMessages(library(DESeq2))')
r.globalenv['counts'] = counts
r.globalenv['meta'] = meta
r.r('dds = DESeqDataSetFromMatrix(countData=counts, colData=meta, '
'design={})'.format(design))
r.r('dds = estimateSizeFactors(dds)')
r.r('sf = sizeFactors(dds)')
sf = r.globalenv['sf']
return pd.Series(sf, index=counts.columns)
def goseq_gene_enrichment(genes, sig, plot_fn=None, length_correct=True):
"""
Perform goseq enrichment for an Ensembl gene set.
Parameters
----------
genes : list
List of all genes as Ensembl IDs.
sig : list
List of boolean values indicating whether each gene is significant or
not.
plot_fn : str
Path to save length bias plot to. If not provided, the plot is deleted.
length_correct : bool
Correct for length bias.
Returns
-------
go_results : pandas.DataFrame
Dataframe with goseq results as well as Benjamini-Hochberg correct
p-values.
"""
import os
import readline
import statsmodels.stats.multitest as smm
import rpy2.robjects as r
genes = list(genes)
sig = [bool(x) for x in sig]
r.r('suppressMessages(library(goseq))')
r.globalenv['genes'] = list(genes)
r.globalenv['group'] = list(sig)
r.r('group = as.logical(group)')
r.r('names(group) = genes')
r.r('pwf = nullp(group, "hg19", "ensGene")')
if length_correct:
r.r('wall = goseq(pwf, "hg19", "ensGene")')
else:
r.r('wall = goseq(pwf, "hg19", "ensGene", method="Hypergeometric")')
r.r('t = as.data.frame(wall)')
t = r.globalenv['t']
go_results = pd.DataFrame(columns=list(t.colnames))
for i, c in enumerate(go_results.columns):
go_results[c] = list(t[i])
r, c, ask, abf = smm.multipletests(
go_results.over_represented_pvalue, alpha=0.05, method='fdr_i')
go_results['over_represented_pvalue_bh'] = c
r, c, ask, abf = smm.multipletests(
go_results.under_represented_pvalue, alpha=0.05, method='fdr_i')
go_results['under_represented_pvalue_bh'] = c
go_results.index = go_results.category
go_results = go_results.drop('category', axis=1)
if plot_fn and os.path.exists('Rplots.pdf'):
from os import rename
rename('Rplots.pdf', plot_fn)
elif os.path.exists('Rplots.pdf'):
from os import remove
remove('Rplots.pdf')
return go_results
def categories_to_colors(cats, colormap=None):
"""
Map categorical data to colors.
Parameters
----------
cats : pandas.Series or list
Categorical data as a list or in a Series.
colormap : list
List of RGB triples. If not provided, the tableau20 colormap defined in
this module will be used.
Returns
-------
legend : pd.Series
Series whose values are colors and whose index are the original
categories that correspond to those colors.
"""
if colormap is None:
colormap = tableau20
if type(cats) != pd.Series:
cats = pd.Series(cats)
legend = pd.Series(dict(list(zip(set(cats), colormap))))
# colors = pd.Series([legend[x] for x in cats.values], index=cats.index)
# I've removed this output:
# colors : pd.Series
# Series whose values are the colors for each category. If cats was a
# Series, then out will have the same index as cats.
return(legend)
def plot_color_legend(legend, horizontal=False, ax=None):
"""
Plot a pandas Series with labels and colors.
Parameters
----------
legend : pandas.Series
Pandas Series whose values are RGB triples and whose index contains
categorical labels.
horizontal : bool
If True, plot horizontally.
ax : matplotlib.axis
Axis to plot on.
Returns
-------
ax : matplotlib.axis
Plot axis.
"""
import matplotlib.pyplot as plt
import numpy as np
t = np.array([np.array([x for x in legend])])
if ax is None:
fig, ax = plt.subplots(1, 1)
if horizontal:
ax.imshow(t, interpolation='none')
ax.set_yticks([])
ax.set_xticks(np.arange(0, legend.shape[0]))
t = ax.set_xticklabels(legend.index)
else:
t = t.reshape([legend.shape[0], 1, 3])
ax.imshow(t, interpolation='none')
ax.set_xticks([])
ax.set_yticks(np.arange(0, legend.shape[0]))
t = ax.set_yticklabels(legend.index)
return ax
def make_color_legend_rects(colors, labels=None):
"""
Make list of rectangles and labels for making legends.
Parameters
----------
colors : pandas.Series or list
Pandas series whose values are colors and index is labels.
Alternatively, you can provide a list with colors and provide the labels
as a list.
labels : list
If colors is a list, this should be the list of corresponding labels.
Returns
-------
out : pd.Series
Pandas series whose values are matplotlib rectangles and whose index are
the legend labels for those rectangles. You can add each of these
rectangles to your axis using ax.add_patch(r) for r in out then create a
legend whose labels are out.values and whose labels are
legend_rects.index:
for r in legend_rects:
ax.add_patch(r)
lgd = ax.legend(legend_rects.values, labels=legend_rects.index)
"""
from matplotlib.pyplot import Rectangle
if labels:
d = dict(list(zip(labels, colors)))
se = pd.Series(d)
else:
se = colors
rects = []
for i in se.index:
r = Rectangle((0, 0), 0, 0, fc=se[i])
rects.append(r)
out = pd.Series(rects, index=se.index)
return out
class SVD:
def __init__(self, df, mean_center=True, scale_variance=False, full_matrices=False):
"""
Perform SVD for data matrix using scipy.linalg.svd. Note that this is currently inefficient
for large matrices due to some of the pandas operations.
Parameters
----------
df : pandas.DataFrame
Pandas data frame with data.
mean_center : bool
If True, mean center the rows. This should be done if not already
done.
scale_variance : bool
If True, scale the variance of each row to be one. Combined with
mean centering, this will transform your data into z-scores.
full_matrices : bool
Passed to scipy.linalg.svd. If True, U and Vh are of shape (M, M), (N, N). If False, the
shapes are (M, K) and (K, N), where K = min(M, N).
"""
import copy
self.data_orig = copy.deepcopy(df)
self.data = copy.deepcopy(df)
if mean_center:
self.data = (self.data.T - self.data.mean(axis=1)).T
if scale_variance:
self.data = (self.data.T / self.data.std(axis=1)).T
self._perform_svd(full_matrices)
def _perform_svd(self, full_matrices):
from scipy.linalg import svd
u, s, vh = svd(self.data, full_matrices=full_matrices)
self.u_orig = u
self.s_orig = s
self.vh_orig = vh
self.u = pd.DataFrame(
u,
index=self.data.index,
columns=['PC{}'.format(x) for x in range(1, u.shape[1] + 1)],
)
self.v = pd.DataFrame(
vh.T,
index=self.data.columns,
columns=['PC{}'.format(x) for x in range(1, vh.shape[0] + 1)],
)
index = ['PC{}'.format(x) for x in range(1, len(s) + 1)]
self.s_norm = pd.Series(s / s.sum(), index=index)
def plot_variance_explained(self, cumulative=False, xtick_start=1,
xtick_spacing=1, num_pc=None):
"""
Plot amount of variance explained by each principal component.
Parameters
----------
num_pc : int
Number of principal components to plot. If None, plot all.
cumulative : bool
If True, include cumulative variance.
xtick_start : int
The first principal component to label on the x-axis.
xtick_spacing : int
The spacing between labels on the x-axis.
"""
import matplotlib.pyplot as plt
from numpy import arange
if num_pc:
s_norm = self.s_norm[0:num_pc]
else:
s_norm = self.s_norm
if cumulative:
s_cumsum = s_norm.cumsum()
plt.bar(list(range(s_cumsum.shape[0])), s_cumsum.values,
label='Cumulative', color=(0.17254901960784313,
0.6274509803921569,
0.17254901960784313))
plt.bar(list(range(s_norm.shape[0])), s_norm.values, label='Per PC',
color=(0.12156862745098039, 0.4666666666666667,
0.7058823529411765))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylabel('Variance')
else:
plt.bar(list(range(s_norm.shape[0])), s_norm.values,
color=(0.12156862745098039, 0.4666666666666667,
0.7058823529411765))
plt.ylabel('Proportion variance explained')
plt.xlabel('PC')
plt.xlim(0, s_norm.shape[0])
tick_locs = arange(xtick_start - 1, s_norm.shape[0],
step=xtick_spacing)
# 0.8 is the width of the bars.
tick_locs = tick_locs + 0.4
plt.xticks(tick_locs,
arange(xtick_start, s_norm.shape[0] + 1, xtick_spacing))
def plot_pc_scatter(self, pc1, pc2, v=True, subset=None, ax=None,
color=None, s=None, marker=None, color_name=None,
s_name=None, marker_name=None):
"""
Make a scatter plot of two principal components. You can create
differently colored, sized, or marked scatter points.
Parameters
----------
pc1 : str
String of form PCX where X is the number of the principal component
you want to plot on the x-axis.
pc2 : str
String of form PCX where X is the number of the principal component
you want to plot on the y-axis.
v : bool
If True, use the v matrix for plotting the principal components
(typical if input data was genes as rows and samples as columns).
If False, use the u matrix.
subset : list
Make the scatter plot using only a subset of the rows of u or v.
ax : matplotlib.axes
Plot the scatter plot on this axis.
color : pandas.Series
Pandas series containing a categorical variable to color the scatter
points.
s : pandas.Series
Pandas series containing a categorical variable to size the scatter
points. Currently limited to 7 distinct values (sizes).
marker : pandas.Series
Pandas series containing a categorical variable to choose the marker
type for the scatter points. Currently limited to 21 distinct values
(marker styles).
color_name : str
Name for the color legend if a categorical variable for color is
provided.
s_name : str
Name for the size legend if a categorical variable for size is
provided.
marker_name : str
Name for the marker legend if a categorical variable for marker type
is provided.
Returns
-------
ax : matplotlib.axes._subplots.AxesSubplot
Scatter plot axis.
TODO: Add ability to label points.
"""
import matplotlib.pyplot as plt
import seaborn as sns
assert s <= 7, 'Error: too many values for "s"'
if v:
df = self.v
else:
df = self.u
if color is not None:
if color.unique().shape[0] <= 10:
colormap = pd.Series(dict(list(zip(set(color.values),
tableau20[0:2 * len(set(color)):2]))))
else:
colormap = pd.Series(dict(list(zip(set(color.values),
sns.color_palette('husl', len(set(color)))))))
color = pd.Series([colormap[x] for x in color.values],
index=color.index)
color_legend = True
if not color_name:
color_name = color.index.name
else:
color = pd.Series([tableau20[0]] * df.shape[0], index=df.index)
color_legend = False
if s is not None:
smap = pd.Series(dict(list(zip(
set(s.values), list(range(30, 351))[0::50][0:len(set(s)) + 1]))))
s = pd.Series([smap[x] for x in s.values],
index=s.index)
s_legend = True
if not s_name:
s_name = s.index.name
else:
s = pd.Series(30, index=df.index)
s_legend = False
markers = ['o', '*', 's', 'v', '+', 'x', 'd',
'p', '2', '<', '|', '>', '_', 'h',
'1', '2', '3', '4', '8', '^', 'D']
if marker is not None:
markermap = pd.Series(dict(list(zip(set(marker.values), markers))))
marker = pd.Series([markermap[x] for x in marker.values],
index=marker.index)
marker_legend = True
if not marker_name:
marker_name = marker.index.name
else:
marker = pd.Series('o', index=df.index)
marker_legend = False
if ax is None:
fig, ax = plt.subplots(1, 1)
for m in set(marker.values):
mse = marker[marker == m]
cse = color[mse.index]
sse = s[mse.index]
ax.scatter(df.ix[mse.index, pc1], df.ix[mse.index, pc2],
s=sse.values, color=list(cse.values), marker=m,
alpha=0.8)
ax.set_title('{} vs. {}'.format(pc1, pc2))
ax.set_xlabel(pc1)
ax.set_ylabel(pc2)
if color_legend:
legend_rects = make_color_legend_rects(colormap)
for r in legend_rects:
ax.add_patch(r)
lgd = ax.legend(legend_rects.values, labels=legend_rects.index,
title=color_name,
loc='upper left',
bbox_to_anchor=(1, 1))
if s_legend:
if lgd:
lgd = ax.add_artist(lgd)
xa, xb = ax.get_xlim()
ya, yb = ax.get_ylim()
for i in smap.index:
ax.scatter([xb + 1], [yb + 1], marker='o',
s=smap[i], color='black', label=i)
lgd = ax.legend(title=s_name, loc='center left',
bbox_to_anchor=(1, 0.5))
ax.set_xlim(xa, xb)
ax.set_ylim(ya, yb)
if marker_legend:
if lgd:
lgd = ax.add_artist(lgd)
xa, xb = ax.get_xlim()
ya, yb = ax.get_ylim()
for i in markermap.index:
t = ax.scatter([xb + 1], [yb + 1], marker=markermap[i],
s=sse.min(), color='black', label=i)
handles, labels = ax.get_legend_handles_labels()
if s_legend:
handles = handles[len(smap):]
labels = labels[len(smap):]
lgd = ax.legend(handles, labels, title=marker_name,
loc='lower left', bbox_to_anchor=(1, 0))
ax.set_xlim(xa, xb)
ax.set_ylim(ya, yb)
# fig.tight_layout()
return fig, ax
def pc_correlation(self, covariates, num_pc=5):
"""
Calculate the correlation between the first num_pc prinicipal components
and known covariates. The size and index of covariates determines
whether u or v is used.
Parameters
----------
covariates : pandas.DataFrame
Dataframe of covariates whose index corresponds to the index of
either u or v.
num_pc : int
Number of principal components to correlate with.
Returns
-------
corr : pandas.Panel
Panel with correlation values and p-values.
"""
from scipy.stats import spearmanr
if (covariates.shape[0] == self.u.shape[0] and
len(set(covariates.index) & set(self.u.index)) == self.u.shape[0]):
mat = self.u
elif (covariates.shape[0] == self.v.shape[0] and
len(set(covariates.index) & set(self.v.index)) == self.v.shape[0]):
mat = self.v
else:
import sys
sys.stderr.write('Covariates differ in size from input data.\n')
sys.exit(1)
corr = pd.Panel(items=['rho', 'pvalue'],
major_axis=covariates.columns,
minor_axis=mat.columns[0:num_pc])
for i in corr.major_axis:
for j in corr.minor_axis:
rho, p = spearmanr(covariates[i], mat[j])
corr.ix['rho', i, j] = rho
corr.ix['pvalue', i, j] = p
return corr
def pc_anova(self, covariates, num_pc=5):
"""
Calculate one-way ANOVA between the first num_pc prinicipal components
and known covariates. The size and index of covariates determines
whether u or v is used.
Parameters
----------
covariates : pandas.DataFrame
Dataframe of covariates whose index corresponds to the index of
either u or v.
num_pc : int
Number of principal components to correlate with.
Returns
-------
anova : pandas.Panel
Panel with F-values and p-values.
"""
from scipy.stats import f_oneway
if (covariates.shape[0] == self.u.shape[0] and
len(set(covariates.index) & set(self.u.index)) == self.u.shape[0]):
mat = self.u
elif (covariates.shape[0] == self.v.shape[0] and
len(set(covariates.index) & set(self.v.index)) == self.v.shape[0]):
mat = self.v
anova = pd.Panel(items=['fvalue', 'pvalue'],
major_axis=covariates.columns,
minor_axis=mat.columns[0:num_pc])
for i in anova.major_axis:
for j in anova.minor_axis:
t = [mat[j][covariates[i] == x] for x in set(covariates[i])]
f, p = f_oneway(*t)
anova.ix['fvalue', i, j] = f
anova.ix['pvalue', i, j] = p
return anova
def manhattan_plot(
res,
ax,
p_filter=1,
p_cutoff=None,
marker_size=10,
font_size=8,
chrom_labels=list(range(1, 23))[0::2],
label_column=None,
category_order=None,
legend=True,
):
"""
Make Manhattan plot for GWAS results. Currently only support autosomes.
Parameters
----------
res : pandas.DataFrame
GWAS results. The following columns are required - chrom (chromsome,
int), pos (genomic position, int), P (GWAS p-value, float).
ax : matplotlib.axis
Matplotlib axis to make Manhattan plot on.
p_filter : float
Only plot p-values smaller than this cutoff. This is useful for testing
because filtering on p-values speeds up the plotting.
p_cutoff : float
Plot horizontal line at this p-value.
marker_size : int
Size of Manhattan markers.
font_size : int
Font size for plots.
chrom_labels : list
List of ints indicating which chromsomes to label. You may want to
modulate this based on the size of the plot. Currently only integers
1-22 are supported.
label_column : str
String with column name from res. This column should contain a
categorical annotation for each variant. These will be indicated by
colors.
category_order : list
If label_column is not None, you can provide a list of the categories
that are contained in the label_column. This will be used to assign the
color palette and will specify the z-order of the categories.
legend : boolean
If True and label_column is not None, plot a legend.
Returns
-------
res : pandas.Dataframe
GWAS results. The results will have additional columns that were used
for plotting.
ax : matplotlib.axis
Axis with the Manhattan plot.
colors : pd.Series or None
If label_column is None, this will be None. Otherwise, if a label_column
is specified, this will be a series with a mapping between the labels
and the colors for each label.
"""
# TODO: It might make sense to allow a variable that specifies the z-order
# of labels in label_column. If there are many labels and points in the same
# place, certain annotations will be preferentially shown.
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
# Filter results based on p-value.
if p_filter < 1:
res = res[res['P'] < p_filter]
# Assign x coordinates for each association.
res['xpos'] = np.nan
chrom_vc = res['chrom'].value_counts()
# total_length is arbitrary, but it's a little easier than working with the
# normalized chromosome sizes to avoid small numbers.
total_length = 1000
right = chrom_sizes_norm.cumsum()
right = right / right[22] * total_length
left = chrom_sizes_norm.cumsum() - chrom_sizes_norm[1]
left = pd.Series(0, list(range(1, 23)))
left[1:23] = right[0:21].values
for chrom in range(1, 23):
if chrom in res['chrom'].values:
res.loc[res['chrom'] == chrom, 'xpos'] = np.linspace(
left[chrom], right[chrom], chrom_vc[chrom])
# Assign colors.
grey = mpl.colors.to_rgb('grey')
light_grey = (0.9, 0.9, 0.9)
middle_grey = (0.8, 0.8, 0.8)
# I first set everything to black, but in the end everything should be
# changed to one of the greys (or other colors if there is an annotation
# column). If there are black points on the plot, that indicates a problem.
res['color'] = 'black'
for chrom in range(1, 23)[0::2]:
if chrom in res['chrom'].values:
ind = res[res.chrom == chrom].index
res.loc[ind, 'color'] = pd.Series([grey for x in ind], index=ind)
for chrom in range(1, 23)[1::2]:
if chrom in res['chrom'].values:
ind = res[res.chrom == chrom].index
res.loc[ind, 'color'] = pd.Series([middle_grey for x in ind], index=ind)
if label_column is not None:
if category_order is not None:
assert set(category_order) == set(res[label_column].dropna())
categories = category_order
else:
categories = list(set(res[label_column].dropna()))
colors = categories_to_colors(
categories,
colormap=sns.color_palette('colorblind'),
)
for cat in categories:
ind = res[res[label_column] == cat].index
res.loc[ind, 'color'] = pd.Series([colors[cat] for x in ind],
index=ind)
# Plot
if label_column is not None:
ind = res[res[label_column].isnull()].index
ax.scatter(
res.loc[ind, 'xpos'],
-np.log10(res.loc[ind, 'P']),
color=res.loc[ind, 'color'],
s=marker_size,
alpha=0.75,
rasterized=True,
label=None,
)
for cat in reversed(categories):
ind = res[res[label_column] == cat].index
ax.scatter(
res.loc[ind, 'xpos'],
-np.log10(res.loc[ind, 'P']),
color=res.loc[ind, 'color'],
s=marker_size,
alpha=0.75,
rasterized=True,
label=None,
)
else:
ax.scatter(
res['xpos'],
-np.log10(res['P']),
color=res['color'],
s=marker_size,
alpha=0.75,
rasterized=True,
label=None,
)
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
ax.grid(axis='x')
ax.grid(axis='y')
ax.grid(axis='y', alpha=0.5, ls='-', lw=0.6)
if p_cutoff is not None:
ax.hlines(
-np.log10(p_cutoff),
-5,
total_length + 5,
color='red',
linestyles='--',
lw=0.8,
alpha=0.5,
)
# These next two lines add background shading. I may add back in as option.
# for chrom in range(1, 23)[0::2]:
# ax.axvspan(left[chrom], right[chrom], facecolor=(0.4, 0.4, 0.4), alpha=0.2, lw=0)
ax.set_xlim(-5, total_length + 5)
ax.set_ylim(0, ymax)
# Set chromosome labels
# ind = range(1, 23)[0::2]
# if skip19:
# ind = [x for x in ind if x != 19]
ind = [x for x in chrom_labels if x in range(1, 23)]
ax.set_xticks(left[ind] + (right[ind] - left[ind]) / 2)
ax.set_xticklabels(ind, fontsize=font_size)
ax.set_ylabel('$-\log_{10} p$ value', fontsize=font_size)
for t in ax.get_xticklabels() + ax.get_yticklabels():
t.set_fontsize(font_size)
if label_column is not None and legend:
for cat in categories:
ax.scatter(
-100,
-100,
s=marker_size,
color=colors[cat],
label=cat,
)
if legend:
ax.legend(
fontsize=font_size- 1,
framealpha=0.5,
frameon=True,
facecolor='white',
)
# TODO: eventually, it would be better to be smarter about the x-axis
# limits. Depending on the size of the markers and plot, some of the markers
# might be cut off.
ax.set_xlim(-5, total_length + 5)
# TODO: eventually, it would be better to be smarter about the y-axis
# limits. Depending on the size of the markers and plot, some of the markers
# might be cut off. Matplotlib doesn't know anything about the size of the
# markers, so it might set the y-limit too low.
ax.set_ylim(-1 * np.log10(p_filter), ymax)
if label_column is None:
colors = None
return(res, ax, colors)
|
py | 1a523745513e09da76ee0697bf6bb9d2150b9deb | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SPDX-License-Identifier: GPL-3.0
#
# GNU Radio Python Flow Graph
# Title: NOAA APT Decoder
# Author: Manolis Surligas, George Vardakis
# Description: A NOAA APT Decoder with automatic image synchronization
# GNU Radio version: 3.8.1.0
from gnuradio import analog
from gnuradio import blocks
from gnuradio import filter
from gnuradio.filter import firdes
from gnuradio import gr
import sys
import signal
from argparse import ArgumentParser
from gnuradio.eng_arg import eng_float, intx
from gnuradio import eng_notation
from gnuradio.filter import pfb
import satnogs
import soapy
import distutils
from distutils import util
class satnogs_noaa_apt_decoder(gr.top_block):
def __init__(self, antenna="RX", bb_freq=0.0, bw=0.0, dc_removal="False", decoded_data_file_path="/tmp/.satnogs/data/data", dev_args="", doppler_correction_per_sec=20, enable_iq_dump=0, file_path="test.wav", flip_images=0, gain=0.0, gain_mode="Overall", iq_file_path="/tmp/iq.dat", lo_offset=100e3, other_settings="", ppm=0, rigctl_port=4532, rx_freq=100e6, samp_rate_rx=2048000, soapy_rx_device="driver=rtlsdr", stream_args="", sync=1, tune_args="", udp_IP="127.0.0.1", udp_dump_host="", udp_dump_port=57356, udp_port=16887, waterfall_file_path="/tmp/waterfall.dat"):
gr.top_block.__init__(self, "NOAA APT Decoder")
##################################################
# Parameters
##################################################
self.antenna = antenna
self.bb_freq = bb_freq
self.bw = bw
self.dc_removal = dc_removal
self.decoded_data_file_path = decoded_data_file_path
self.dev_args = dev_args
self.doppler_correction_per_sec = doppler_correction_per_sec
self.enable_iq_dump = enable_iq_dump
self.file_path = file_path
self.flip_images = flip_images
self.gain = gain
self.gain_mode = gain_mode
self.iq_file_path = iq_file_path
self.lo_offset = lo_offset
self.other_settings = other_settings
self.ppm = ppm
self.rigctl_port = rigctl_port
self.rx_freq = rx_freq
self.samp_rate_rx = samp_rate_rx
self.soapy_rx_device = soapy_rx_device
self.stream_args = stream_args
self.sync = sync
self.tune_args = tune_args
self.udp_IP = udp_IP
self.udp_dump_host = udp_dump_host
self.udp_dump_port = udp_dump_port
self.udp_port = udp_port
self.waterfall_file_path = waterfall_file_path
##################################################
# Variables
##################################################
self.audio_samp_rate = audio_samp_rate = 48000
##################################################
# Blocks
##################################################
self.soapy_source_0_0 = None
# Make sure that the gain mode is valid
if(gain_mode not in ['Overall', 'Specific', 'Settings Field']):
raise ValueError("Wrong gain mode on channel 0. Allowed gain modes: "
"['Overall', 'Specific', 'Settings Field']")
dev = soapy_rx_device
# Stream arguments for every activated stream
tune_args = [tune_args]
settings = [other_settings]
# Setup the device arguments
dev_args = dev_args
self.soapy_source_0_0 = soapy.source(1, dev, dev_args, stream_args,
tune_args, settings, samp_rate_rx, "fc32")
self.soapy_source_0_0.set_dc_removal(0,bool(distutils.util.strtobool(dc_removal)))
# Set up DC offset. If set to (0, 0) internally the source block
# will handle the case if no DC offset correction is supported
self.soapy_source_0_0.set_dc_offset(0,0)
# Setup IQ Balance. If set to (0, 0) internally the source block
# will handle the case if no IQ balance correction is supported
self.soapy_source_0_0.set_iq_balance(0,0)
self.soapy_source_0_0.set_agc(0,False)
# generic frequency setting should be specified first
self.soapy_source_0_0.set_frequency(0, rx_freq - lo_offset)
self.soapy_source_0_0.set_frequency(0,"BB",bb_freq)
# Setup Frequency correction. If set to 0 internally the source block
# will handle the case if no frequency correction is supported
self.soapy_source_0_0.set_frequency_correction(0,ppm)
self.soapy_source_0_0.set_antenna(0,antenna)
self.soapy_source_0_0.set_bandwidth(0,bw)
if(gain_mode != 'Settings Field'):
# pass is needed, in case the template does not evaluare anything
pass
self.soapy_source_0_0.set_gain(0,gain)
self.satnogs_waterfall_sink_0_0 = satnogs.waterfall_sink(4*4160*4, rx_freq, 10, 1024, waterfall_file_path, 1)
self.satnogs_ogg_encoder_0 = satnogs.ogg_encoder(file_path, audio_samp_rate, 0.8)
self.satnogs_noaa_apt_sink_1 = satnogs.noaa_apt_sink(decoded_data_file_path, 2080, 1800, True, False)
self.satnogs_iq_sink_0_0 = satnogs.iq_sink(16768, iq_file_path, False, enable_iq_dump)
self.satnogs_doppler_compensation_0 = satnogs.doppler_compensation(samp_rate_rx, rx_freq, lo_offset, 4*4160*4, 1, 0)
self.rational_resampler_xxx_0_0 = filter.rational_resampler_fff(
interpolation=1,
decimation=4,
taps=None,
fractional_bw=None)
self.pfb_arb_resampler_xxx_0 = pfb.arb_resampler_fff(
audio_samp_rate / (4*4160*4),
taps=None,
flt_size=32)
self.pfb_arb_resampler_xxx_0.declare_sample_delay(0)
self.low_pass_filter_0_0 = filter.fir_filter_ccf(
1,
firdes.low_pass(
1,
4*4160*4,
4*4160*1.1,
1e3,
firdes.WIN_HAMMING,
6.76))
self.hilbert_fc_0 = filter.hilbert_fc(65, firdes.WIN_HAMMING, 6.76)
self.blocks_udp_sink_0_0 = blocks.udp_sink(gr.sizeof_gr_complex*1, udp_dump_host, udp_dump_port, 1472, True)
self.blocks_complex_to_mag_0 = blocks.complex_to_mag(1)
self.band_pass_filter_0 = filter.fir_filter_fff(
4,
firdes.band_pass(
1,
(4*4160*4 ),
500,
4.2e3,
200,
firdes.WIN_HAMMING,
6.76))
self.analog_wfm_rcv_0 = analog.wfm_rcv(
quad_rate=4*4160*4,
audio_decimation=1,
)
##################################################
# Connections
##################################################
self.connect((self.analog_wfm_rcv_0, 0), (self.band_pass_filter_0, 0))
self.connect((self.analog_wfm_rcv_0, 0), (self.pfb_arb_resampler_xxx_0, 0))
self.connect((self.band_pass_filter_0, 0), (self.hilbert_fc_0, 0))
self.connect((self.blocks_complex_to_mag_0, 0), (self.rational_resampler_xxx_0_0, 0))
self.connect((self.hilbert_fc_0, 0), (self.blocks_complex_to_mag_0, 0))
self.connect((self.low_pass_filter_0_0, 0), (self.analog_wfm_rcv_0, 0))
self.connect((self.pfb_arb_resampler_xxx_0, 0), (self.satnogs_ogg_encoder_0, 0))
self.connect((self.rational_resampler_xxx_0_0, 0), (self.satnogs_noaa_apt_sink_1, 0))
self.connect((self.satnogs_doppler_compensation_0, 0), (self.blocks_udp_sink_0_0, 0))
self.connect((self.satnogs_doppler_compensation_0, 0), (self.low_pass_filter_0_0, 0))
self.connect((self.satnogs_doppler_compensation_0, 0), (self.satnogs_iq_sink_0_0, 0))
self.connect((self.satnogs_doppler_compensation_0, 0), (self.satnogs_waterfall_sink_0_0, 0))
self.connect((self.soapy_source_0_0, 0), (self.satnogs_doppler_compensation_0, 0))
def get_antenna(self):
return self.antenna
def set_antenna(self, antenna):
self.antenna = antenna
self.soapy_source_0_0.set_antenna(0,self.antenna)
def get_bb_freq(self):
return self.bb_freq
def set_bb_freq(self, bb_freq):
self.bb_freq = bb_freq
self.soapy_source_0_0.set_frequency(0,"BB",self.bb_freq)
def get_bw(self):
return self.bw
def set_bw(self, bw):
self.bw = bw
self.soapy_source_0_0.set_bandwidth(0,self.bw)
def get_dc_removal(self):
return self.dc_removal
def set_dc_removal(self, dc_removal):
self.dc_removal = dc_removal
self.soapy_source_0_0.set_dc_removal(0,bool(distutils.util.strtobool(self.dc_removal)))
def get_decoded_data_file_path(self):
return self.decoded_data_file_path
def set_decoded_data_file_path(self, decoded_data_file_path):
self.decoded_data_file_path = decoded_data_file_path
def get_dev_args(self):
return self.dev_args
def set_dev_args(self, dev_args):
self.dev_args = dev_args
def get_doppler_correction_per_sec(self):
return self.doppler_correction_per_sec
def set_doppler_correction_per_sec(self, doppler_correction_per_sec):
self.doppler_correction_per_sec = doppler_correction_per_sec
def get_enable_iq_dump(self):
return self.enable_iq_dump
def set_enable_iq_dump(self, enable_iq_dump):
self.enable_iq_dump = enable_iq_dump
def get_file_path(self):
return self.file_path
def set_file_path(self, file_path):
self.file_path = file_path
def get_flip_images(self):
return self.flip_images
def set_flip_images(self, flip_images):
self.flip_images = flip_images
def get_gain(self):
return self.gain
def set_gain(self, gain):
self.gain = gain
self.soapy_source_0_0.set_gain(0, self.gain)
def get_gain_mode(self):
return self.gain_mode
def set_gain_mode(self, gain_mode):
self.gain_mode = gain_mode
def get_iq_file_path(self):
return self.iq_file_path
def set_iq_file_path(self, iq_file_path):
self.iq_file_path = iq_file_path
def get_lo_offset(self):
return self.lo_offset
def set_lo_offset(self, lo_offset):
self.lo_offset = lo_offset
self.soapy_source_0_0.set_frequency(0, self.rx_freq - self.lo_offset)
def get_other_settings(self):
return self.other_settings
def set_other_settings(self, other_settings):
self.other_settings = other_settings
def get_ppm(self):
return self.ppm
def set_ppm(self, ppm):
self.ppm = ppm
self.soapy_source_0_0.set_frequency_correction(0,self.ppm)
def get_rigctl_port(self):
return self.rigctl_port
def set_rigctl_port(self, rigctl_port):
self.rigctl_port = rigctl_port
def get_rx_freq(self):
return self.rx_freq
def set_rx_freq(self, rx_freq):
self.rx_freq = rx_freq
self.soapy_source_0_0.set_frequency(0, self.rx_freq - self.lo_offset)
def get_samp_rate_rx(self):
return self.samp_rate_rx
def set_samp_rate_rx(self, samp_rate_rx):
self.samp_rate_rx = samp_rate_rx
def get_soapy_rx_device(self):
return self.soapy_rx_device
def set_soapy_rx_device(self, soapy_rx_device):
self.soapy_rx_device = soapy_rx_device
def get_stream_args(self):
return self.stream_args
def set_stream_args(self, stream_args):
self.stream_args = stream_args
def get_sync(self):
return self.sync
def set_sync(self, sync):
self.sync = sync
def get_tune_args(self):
return self.tune_args
def set_tune_args(self, tune_args):
self.tune_args = tune_args
def get_udp_IP(self):
return self.udp_IP
def set_udp_IP(self, udp_IP):
self.udp_IP = udp_IP
def get_udp_dump_host(self):
return self.udp_dump_host
def set_udp_dump_host(self, udp_dump_host):
self.udp_dump_host = udp_dump_host
def get_udp_dump_port(self):
return self.udp_dump_port
def set_udp_dump_port(self, udp_dump_port):
self.udp_dump_port = udp_dump_port
def get_udp_port(self):
return self.udp_port
def set_udp_port(self, udp_port):
self.udp_port = udp_port
def get_waterfall_file_path(self):
return self.waterfall_file_path
def set_waterfall_file_path(self, waterfall_file_path):
self.waterfall_file_path = waterfall_file_path
def get_audio_samp_rate(self):
return self.audio_samp_rate
def set_audio_samp_rate(self, audio_samp_rate):
self.audio_samp_rate = audio_samp_rate
self.pfb_arb_resampler_xxx_0.set_rate(self.audio_samp_rate / (4*4160*4))
def argument_parser():
description = 'A NOAA APT Decoder with automatic image synchronization'
parser = ArgumentParser(description=description)
parser.add_argument(
"--antenna", dest="antenna", type=str, default="RX",
help="Set antenna [default=%(default)r]")
parser.add_argument(
"--bb-freq", dest="bb_freq", type=eng_float, default="0.0",
help="Set Baseband CORDIC frequency (if the device supports it) [default=%(default)r]")
parser.add_argument(
"--bw", dest="bw", type=eng_float, default="0.0",
help="Set Bandwidth [default=%(default)r]")
parser.add_argument(
"--dc-removal", dest="dc_removal", type=str, default="False",
help="Set Remove automatically the DC offset (if the device support it) [default=%(default)r]")
parser.add_argument(
"--decoded-data-file-path", dest="decoded_data_file_path", type=str, default="/tmp/.satnogs/data/data",
help="Set decoded_data_file_path [default=%(default)r]")
parser.add_argument(
"--dev-args", dest="dev_args", type=str, default="",
help="Set Device arguments [default=%(default)r]")
parser.add_argument(
"--doppler-correction-per-sec", dest="doppler_correction_per_sec", type=intx, default=20,
help="Set doppler_correction_per_sec [default=%(default)r]")
parser.add_argument(
"--enable-iq-dump", dest="enable_iq_dump", type=intx, default=0,
help="Set enable_iq_dump [default=%(default)r]")
parser.add_argument(
"--file-path", dest="file_path", type=str, default="test.wav",
help="Set file_path [default=%(default)r]")
parser.add_argument(
"--flip-images", dest="flip_images", type=intx, default=0,
help="Set flip_images [default=%(default)r]")
parser.add_argument(
"--gain", dest="gain", type=eng_float, default="0.0",
help="Set gain [default=%(default)r]")
parser.add_argument(
"--gain-mode", dest="gain_mode", type=str, default="Overall",
help="Set gain_mode [default=%(default)r]")
parser.add_argument(
"--iq-file-path", dest="iq_file_path", type=str, default="/tmp/iq.dat",
help="Set iq_file_path [default=%(default)r]")
parser.add_argument(
"--lo-offset", dest="lo_offset", type=eng_float, default="100.0k",
help="Set lo_offset [default=%(default)r]")
parser.add_argument(
"--other-settings", dest="other_settings", type=str, default="",
help="Set Soapy Channel other settings [default=%(default)r]")
parser.add_argument(
"--ppm", dest="ppm", type=eng_float, default="0.0",
help="Set ppm [default=%(default)r]")
parser.add_argument(
"--rigctl-port", dest="rigctl_port", type=intx, default=4532,
help="Set rigctl_port [default=%(default)r]")
parser.add_argument(
"--rx-freq", dest="rx_freq", type=eng_float, default="100.0M",
help="Set rx_freq [default=%(default)r]")
parser.add_argument(
"--samp-rate-rx", dest="samp_rate_rx", type=eng_float, default="2.048M",
help="Set Device Sampling rate [default=%(default)r]")
parser.add_argument(
"--soapy-rx-device", dest="soapy_rx_device", type=str, default="driver=rtlsdr",
help="Set soapy_rx_device [default=%(default)r]")
parser.add_argument(
"--stream-args", dest="stream_args", type=str, default="",
help="Set Soapy Stream arguments [default=%(default)r]")
parser.add_argument(
"--sync", dest="sync", type=intx, default=1,
help="Set sync [default=%(default)r]")
parser.add_argument(
"--tune-args", dest="tune_args", type=str, default="",
help="Set Soapy Channel Tune arguments [default=%(default)r]")
parser.add_argument(
"--udp-IP", dest="udp_IP", type=str, default="127.0.0.1",
help="Set udp_IP [default=%(default)r]")
parser.add_argument(
"--udp-dump-host", dest="udp_dump_host", type=str, default="",
help="Set udp_dump_host [default=%(default)r]")
parser.add_argument(
"--udp-dump-port", dest="udp_dump_port", type=intx, default=57356,
help="Set udp_dump_port [default=%(default)r]")
parser.add_argument(
"--udp-port", dest="udp_port", type=intx, default=16887,
help="Set udp_port [default=%(default)r]")
parser.add_argument(
"--waterfall-file-path", dest="waterfall_file_path", type=str, default="/tmp/waterfall.dat",
help="Set waterfall_file_path [default=%(default)r]")
return parser
def main(top_block_cls=satnogs_noaa_apt_decoder, options=None):
if options is None:
options = argument_parser().parse_args()
tb = top_block_cls(antenna=options.antenna, bb_freq=options.bb_freq, bw=options.bw, dc_removal=options.dc_removal, decoded_data_file_path=options.decoded_data_file_path, dev_args=options.dev_args, doppler_correction_per_sec=options.doppler_correction_per_sec, enable_iq_dump=options.enable_iq_dump, file_path=options.file_path, flip_images=options.flip_images, gain=options.gain, gain_mode=options.gain_mode, iq_file_path=options.iq_file_path, lo_offset=options.lo_offset, other_settings=options.other_settings, ppm=options.ppm, rigctl_port=options.rigctl_port, rx_freq=options.rx_freq, samp_rate_rx=options.samp_rate_rx, soapy_rx_device=options.soapy_rx_device, stream_args=options.stream_args, sync=options.sync, tune_args=options.tune_args, udp_IP=options.udp_IP, udp_dump_host=options.udp_dump_host, udp_dump_port=options.udp_dump_port, udp_port=options.udp_port, waterfall_file_path=options.waterfall_file_path)
def sig_handler(sig=None, frame=None):
tb.stop()
tb.wait()
sys.exit(0)
signal.signal(signal.SIGINT, sig_handler)
signal.signal(signal.SIGTERM, sig_handler)
tb.start()
tb.wait()
if __name__ == '__main__':
main()
|
py | 1a52375970df403d4d0a9666bf106bcd16a94074 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Entry point of the app module.
To run the app locally, run "python3 -m app" in the
progress-dashboard-rest directory
"""
from app import main
main.main()
|
py | 1a52377ef27226e0a5fe92663924401abf5db01c | from functools import update_wrapper
from string import split
import re
from collections import defaultdict
# a simple PEG parser taken from Peter Norvig's "Design of Computer Programs" course at udacity and slightly modified
def make_grammar(description, whitespace=r'\s*'):
"""Convert a description to a grammar. Each line is a rule for a
non-terminal symbol; it looks like this:
Symbol => A1 A2 ... | B1 B2 ... | C1 C2 ...
where the right-hand side is one or more alternatives, separated by
the '|' sign. Each alternative is a sequence of atoms, separated by
spaces. An atom is either a symbol on some left-hand side, or it is
a regular expression that will be passed to re.match to match a token.
Notation for *, +, or ? not allowed in a rule alternative (but ok
within a token). Use '\' to continue long lines. You must include spaces
or tabs around '=>' and '|'. That's within the grammar description itself.
The grammar that gets defined allows whitespace between tokens by default;
specify '' as the second argument to grammar() to disallow this (or supply
any regular expression to describe allowable whitespace between tokens)."""
G = {' ': whitespace}
description = description.replace('\t', ' ') # no tabs!
for line in split(description, '\n'):
line = line.strip()
if not line: continue
lhs, rhs = split(line, ' => ', 1)
alternatives = split(rhs, ' | ')
assert lhs not in G
G[lhs] = tuple(map(split, alternatives))
return G
def decorator(d):
def _d(fn):
return update_wrapper(d(fn), fn)
update_wrapper(_d, d)
return _d
@decorator
def memo(f):
cache = {}
def _f(*args):
try:
return cache[args]
except KeyError:
cache[args] = result = f(*args)
return result
except TypeError:
# some element of args can't be a dict key
return f(args)
return _f
Fail = (None, None)
def parse(start_symbol, grammar, text):
"""Example call: parse('Exp', G, '3*x + b').
Returns a (tree, remainder) pair. If remainder is '', it parsed the whole
string. Failure iff remainder is None. This is a deterministic PEG parser,
so rule order (left-to-right) matters. Do 'E => T op E | T', putting the
longest parse first; don't do 'E => T | T op E'
Also, no left recursion allowed: don't do 'E => E op T'"""
tokenizer = grammar[' '] + '(%s)'
def parse_sequence(sequence, text):
result = []
for atom in sequence:
tree, text = parse_atom(atom, text)
if text is None: return Fail
result.append(tree)
return result, text
@memo
def parse_atom(atom, text):
if atom in grammar: # Non-Terminal: tuple of alternatives
for alternative in grammar[atom]:
tree, rem = parse_sequence(alternative, text)
if rem is not None: return [atom]+tree, rem
return Fail
else: # Terminal: match characters against start of text
m = re.match(tokenizer % atom, text)
return Fail if (not m) else (m.group(1), text[m.end():])
# Body of parse:
return parse_atom(start_symbol, text)
def simplify_parse(parse, symbols):
'''Walk the parse tree in order and remove nodes not in "symbols" by adding their children to the parent node.
Returns a list of nodes, because what if we removed the top-level node?
This is a monad, btw. Like, for reals.
'''
def rec(parse):
if isinstance(parse, basestring):
yield parse
elif parse[0] in symbols:
result = [parse[0]]
for it in parse[1:]:
result.extend(rec(it))
yield result
else:
for it in parse[1:]:
for it in rec(it):
yield it
return list(rec(parse))
def filter_terminals(lst):
return [it for it in lst if not isinstance(it, basestring)]
def main():
g = make_grammar('''
labelled_instruction => label : instruction | label : | instruction
instruction => op arglist | op
arglist => arg argsep arglist | arg
op => [a-zA-Z]+
arg => [a-zA-Z0-9\[\]]+
label => [_a-zA-Z][_a-zA-Z0-9]*
argsep => \s+
''')
print parse('labelled_instruction', g, 'asdf: MUL [A] 1 3')
print parse('labelled_instruction', g, 'asdf:')
print parse('labelled_instruction', g, 'INC')
p, _ = parse('labelled_instruction', g, 'asdf: MUL [A] 1 3')
print filter_terminals(simplify_parse(p, ('label', 'op', 'arg')))
if __name__ == '__main__':
main() |
py | 1a5238de3315a500bc94e3d3512b1b542411e207 | #!/usr/bin/pyth2.7
import libiopc_rest as rst
def func_add_img(hostname, options):
payload = '{'
payload += '"ops":"add_qemu_img",'
payload += '"format":"qcow2",'
payload += '"disk_path":"/hdd/data/99_Misc/VMs/sys005.qcow2",'
payload += '"size":30,'
#payload += '"disk_path":"/hdd/data/00_Daily/Data002.qcow2",'
#payload += '"size":200,'
payload += '"size_unit":"G",'
payload += '}'
return rst.http_post_ops_by_pyaload(hostname, payload)
def _start_qemu(idx):
payload = '{'
payload += '"ops":"start_qemu",'
payload += '"qemu_index":%d' % idx
payload += '}'
return payload
def _gencfg_qemu(idx):
payload = '{'
payload += '"ops":"gen_cfg_qemu",'
payload += '"qemu_index":%d' % idx
payload += '}'
return payload
def func_gen_cfg1(hostname, options):
payload = _gencfg_qemu(0)
return rst.http_post_ops_by_pyaload(hostname, payload)
def func_start_qemu1(hostname, options):
payload = _start_qemu(0)
return rst.http_post_ops_by_pyaload(hostname, payload)
def func_gen_cfg2(hostname, options):
payload = _gencfg_qemu(1)
return rst.http_post_ops_by_pyaload(hostname, payload)
def func_start_qemu2(hostname, options):
payload = _start_qemu(1)
return rst.http_post_ops_by_pyaload(hostname, payload)
def func_gen_cfg3(hostname, options):
payload = _gencfg_qemu(2)
return rst.http_post_ops_by_pyaload(hostname, payload)
def func_start_qemu3(hostname, options):
payload = _start_qemu(2)
return rst.http_post_ops_by_pyaload(hostname, payload)
def func_gen_cfg4(hostname, options):
payload = _gencfg_qemu(3)
return rst.http_post_ops_by_pyaload(hostname, payload)
def func_start_qemu4(hostname, options):
payload = _start_qemu(3)
return rst.http_post_ops_by_pyaload(hostname, payload)
def func_gen_cfg5(hostname, options):
payload = _gencfg_qemu(4)
return rst.http_post_ops_by_pyaload(hostname, payload)
def func_start_qemu5(hostname, options):
payload = _start_qemu(4)
return rst.http_post_ops_by_pyaload(hostname, payload)
|
py | 1a5238f0a9780133ce0ddf93aa8b917022c7c694 | import collections
import functools
import glob
import ntpath
import os
import random
import re
import subprocess
import sys
import tempfile
from collections import Counter
from pathlib import Path
from urllib.request import Request, urlopen
import fastnumbers
import humanize
import numpy as np
import pandas as pd
import six
from fastnumbers import isint, isfloat
from string_grouper import match_strings
from optimus import ROOT_DIR
from optimus.engines import functions as F # Used in eval
from optimus.helpers.check import is_url
from optimus.helpers.columns import parse_columns
from optimus.helpers.converter import any_dataframe_to_pandas
from optimus.helpers.core import val_to_list, one_list_to_val
from optimus.helpers.logger import logger
from optimus.helpers.raiseit import RaiseIt
from optimus.infer import is_
F = F # To do not remove the import accidentally when using pycharm auto clean import feature
def random_int(n=5):
"""
Create a random string of ints
:return:
"""
return str(random.randint(1, 10 ** n))
def collect_as_list(df):
return df.rdd.flatMap(lambda x: x).collect()
def collect_as_dict(df, limit=None):
"""
Return a dict from a Collect result
[(col_name, row_value),(col_name_1, row_value_2),(col_name_3, row_value_3),(col_name_4, row_value_4)]
:return:
"""
dict_result = []
df = any_dataframe_to_pandas(df)
# if there is only an element in the dict just return the value
if len(dict_result) == 1:
dict_result = next(iter(dict_result.values()))
else:
col_names = parse_columns(df, "*")
# Because asDict can return messed columns names we order
for index, row in df.iterrows():
# _row = row.asDict()
r = collections.OrderedDict()
# for col_name, value in row.iteritems():
for col_name in col_names:
r[col_name] = row[col_name]
dict_result.append(r)
return dict_result
# def collect_as_dict(df, limit=None):
# """
# Return a dict from a Collect result
# :param df:
# :return:
# """
# # # Explore this approach seems faster
# # use_unicode = True
# # from pyspark.serializers import UTF8Deserializer
# # from pyspark.rdd import RDD
# # rdd = df._jdf.toJSON()
# # r = RDD(rdd.toJavaRDD(), df._sc, UTF8Deserializer(use_unicode))
# # if limit is None:
# # r.collect()
# # else:
# # r.take(limit)
# # return r
# #
# from optimus.helpers.columns import parse_columns
# dict_result = []
#
# # if there is only an element in the dict just return the value
# if len(dict_result) == 1:
# dict_result = next(iter(dict_result.values()))
# else:
# col_names = parse_columns(df, "*")
#
# # Because asDict can return messed columns names we order
# for row in df.collect():
# _row = row.asDict()
# r = collections.OrderedDict()
# for col in col_names:
# r[col] = _row[col]
# dict_result.append(r)
# return dict_result
def filter_list(val, index=0):
"""
Convert a list to None, int, str or a list filtering a specific index
[] to None
['test'] to test
:param val:
:param index:
:return:
"""
if len(val) == 0:
return None
else:
return one_list_to_val([column[index] for column in val])
def absolute_path(files, format="posix"):
"""
User project base folder to construct and absolute path
:param files: path files
:param format: posix or uri
:return:
"""
files = val_to_list(files)
result = None
if format == "uri":
result = [Path(ROOT_DIR + file).as_uri() for file in files]
elif format == "posix":
result = [Path(ROOT_DIR + file).as_posix() for file in files]
else:
RaiseIt.value_error(format, ["posix", "uri"])
result = one_list_to_val(result)
return result
def format_path(path, format="posix"):
"""
Format a path depending fo the operative system
:param path:
:param format:
:return:
"""
if format == "uri":
result = Path(path).as_uri()
elif format == "posix":
result = Path(path).as_posix()
return result
def java_version():
version = subprocess.check_output(['java', '-version'], stderr=subprocess.STDOUT)
pattern = '\"(\d+\.\d+).*\"'
print(re.search(pattern, version).groups()[0])
def setup_google_colab():
"""
Check if we are in Google Colab and setup it up
:return:
"""
from optimus.helpers.constants import JAVA_PATH_COLAB
from optimus.engines.spark.constants import SPARK_PATH_COLAB
from optimus.engines.spark.constants import SPARK_URL
from optimus.engines.spark.constants import SPARK_FILE
IN_COLAB = 'google.colab' in sys.modules
if IN_COLAB:
if not os.path.isdir(JAVA_PATH_COLAB) or not os.path.isdir(SPARK_PATH_COLAB):
print("Installing Optimus, Java8 and Spark. It could take 3 min...")
commands = [
"apt-get install openjdk-8-jdk-headless -qq > /dev/null",
"wget -q {SPARK_URL}".format(SPARK_URL=SPARK_URL),
"tar xf {SPARK_FILE}".format(SPARK_FILE=SPARK_FILE)
]
cmd = " && ".join(commands)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
p_stdout = p.stdout.read().decode("ascii")
p_stderr = p.stderr.read().decode("ascii")
print(p_stdout, p_stderr)
else:
print("Settings env vars")
# Always configure the env vars
os.environ["JAVA_HOME"] = JAVA_PATH_COLAB
os.environ["SPARK_HOME"] = SPARK_PATH_COLAB
def is_pyarrow_installed():
"""
Check if pyarrow is installed
:return:
"""
try:
import pyarrow
have_arrow = True
except ImportError:
have_arrow = False
return have_arrow
def check_env_vars(env_vars):
"""
Check if a environment var exist
:param env_vars: Environment var name
:return:
"""
for env_var in env_vars:
if env_var in os.environ:
logger.print(env_var + "=" + os.environ.get(env_var))
else:
logger.print(env_var + " is not set")
# Reference https://nvie.com/posts/modifying-deeply-nested-structures/
def ellipsis(data, length=20):
"""
Add a "..." if a string y greater than a specific length
:param data:
:param length: length taking into account to cut the string
:return:
"""
data = str(data)
return (data[:length] + '..') if len(data) > length else data
def create_buckets(lower_bound, upper_bound, bins):
"""
Create a dictionary with bins
:param lower_bound: low range
:param upper_bound: high range
:param bins: number of buckets
:return:
"""
range_value = (upper_bound - lower_bound) / bins
low = lower_bound
buckets = []
if bins == 1:
buckets.append({"lower": low, "upper": low + 1, "bucket": 0})
else:
for i in range(0, bins):
high = low + range_value
buckets.append({"lower": low, "upper": high, "bucket": i})
low = high
# Ensure that the upper bound is exactly the higher value.
# Because floating point calculation it can miss the upper bound in the final sum
buckets[bins - 1]["upper"] = upper_bound
return buckets
def deep_sort(obj):
"""
Recursively sort list or dict nested lists
"""
if isinstance(obj, dict):
_sorted = {}
for key in sorted(obj):
_sorted[key] = deep_sort(obj[key])
elif isinstance(obj, list):
new_list = []
for val in obj:
new_list.append(deep_sort(val))
_sorted = sorted(new_list)
else:
_sorted = obj
return _sorted
def infer_dataframes_keys(df_left: pd.DataFrame, df_right: pd.DataFrame):
"""
Infer the possible key columns in two data frames
:param df_left:
:param df_right:
:return:
"""
result = []
df_left = df_left.dropna().astype(str)
df_right = df_right.dropna().astype(str)
# Search column names wiht *id* substring
def check_ids_columns(_df):
return [x for x in _df.columns if re.search(r"_id| id|id_| id ", x)]
ids_columns_left = check_ids_columns(df_left)
ids_columns_right = check_ids_columns(df_right)
if len(ids_columns_left) == len(ids_columns_right):
for i, j in zip(ids_columns_left, ids_columns_right):
result.append((i, j,))
# Numeric median len
def min_max_len(_df):
df_is_int = _df.applymap(lambda value: fastnumbers.isint(value)).sum()
df_is_int = df_is_int[df_is_int == len(_df)]
int_columns_names = df_is_int.index.values
int_columns_df = _df[int_columns_names]
string_len = int_columns_df.applymap(lambda value: len(value))
return (int_columns_names, string_len.min().values, string_len.max().values)
min_max_df_left = min_max_len(df_left)
min_max_df_right = min_max_len(df_right)
def median_len(arr, idx):
"""
Calculate median len of the columns string
:param arr:
:param idx:
:return:
"""
_min = arr[1][idx]
_max = arr[2][idx]
if _min != _max:
_median = _max - _min
else:
_median = _max
return _median
for i, col_l in enumerate(min_max_df_left[0]):
median_left = median_len(min_max_df_left, i)
for j, col_r in enumerate(min_max_df_right[0]):
median_right = median_len(min_max_df_right, j)
if median_left == median_right:
result.append((col_l, col_r,))
# String Clustering
for col_l in df_left:
for col_r in df_right:
try:
m = match_strings(df_left[col_l], df_right[col_r], min_similarity=0.05)
if len(m) > 0:
result.append((col_l, col_r,))
except ValueError:
pass
# Count tuples
return [(count,) + item for item, count in Counter(result).items()]
def update_dict(d, u):
"""
Update only the given keys
:param d:
:param u:
:return:
"""
# python 3.8+ compatibility
try:
collectionsAbc = collections.abc
except ModuleNotFoundError:
collectionsAbc = collections
for k, v in six.iteritems(u):
dv = d.get(k, {})
if not isinstance(dv, collectionsAbc.Mapping):
d[k] = v
elif isinstance(v, collectionsAbc.Mapping):
d[k] = update_dict(dv, v)
else:
d[k] = v
return d
def reduce_mem_usage(df, categorical=True, categorical_threshold=50, verbose=False):
"""
Change the columns datatypes to reduce the memory usage. Also identify
:param df:
:param categorical:
:param categorical_threshold:
:param verbose:
:return:
"""
# Reference https://www.kaggle.com/arjanso/reducing-dataframe-memory-size-by-65/notebook
start_mem_usg = df.ext.size()
ints = df.applymap(isint).sum().compute().to_dict()
floats = df.applymap(isfloat).sum().compute().to_dict()
nulls = df.isnull().sum().compute().to_dict()
total_rows = len(df)
columns_dtype = {}
for x, y in ints.items():
if ints[x] == nulls[x]:
dtype = "object"
elif floats[x] == total_rows:
dtype = "numerical"
elif total_rows <= ints[x] + nulls[x]:
dtype = "numerical"
else:
dtype = "object"
columns_dtype[x] = dtype
numerical_int = [col for col, dtype in columns_dtype.items() if dtype == "numerical"]
final = {}
if len(numerical_int) > 0:
min_max = df.cols.range(numerical_int)
import numpy as np
for col_name in min_max.keys():
_min = min_max[col_name]["min"]
_max = min_max[col_name]["max"]
if _min >= 0:
if _max < 255:
final[col_name] = np.uint8
elif _max < 65535:
final[col_name] = np.uint16
elif _max < 4294967295:
final[col_name] = np.uint32
else:
final[col_name] = np.uint64
else:
if _min > np.iinfo(np.int8).min and _max < np.iinfo(np.int8).max:
final[col_name] = np.int8
elif _min > np.iinfo(np.int16).min and _max < np.iinfo(np.int16).max:
final[col_name] = np.int16
elif _min > np.iinfo(np.int32).min and _max < np.iinfo(np.int32).max:
final[col_name] = np.int32
elif _min > np.iinfo(np.int64).min and _max < np.iinfo(np.int64).max:
final[col_name] = np.int64
# print(final[col_name])
object_int = [col for col, dtype in columns_dtype.items() if dtype == "object"]
if len(object_int) > 0:
count_values = df.cols.value_counts(object_int)
# if categorical is True:
# for col_name in object_int:
# if len(count_values[col_name]) <= categorical_threshold:
# final[col_name] = "category"
df = df.astype(final)
mem_usg = df.ext.size()
if verbose is True:
print("Memory usage after optimization:", humanize.naturalsize(start_mem_usg))
print("Memory usage before optimization is: ", humanize.naturalsize(mem_usg))
print(round(100 * mem_usg / start_mem_usg), "% of the initial size")
return df
def downloader(url, file_format):
"""
Send the request to download a file
"""
def write_file(response, file, chunk_size=8192):
"""
Load the data from the http request and save it to disk
:param response: data returned from the server
:param file:
:param chunk_size: size chunk size of the data
:return:
"""
total_size = response.headers['Content-Length'].strip() if 'Content-Length' in response.headers else 100
total_size = int(total_size)
bytes_so_far = 0
while 1:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not chunk:
break
file.write(chunk)
total_size = bytes_so_far if bytes_so_far > total_size else total_size
return bytes_so_far
# try to infer the file format using the file extension
if file_format is None:
filename, file_format = os.path.splitext(url)
file_format = file_format.replace('.', '')
i = url.rfind('/')
data_name = url[(i + 1):]
headers = {"User-Agent": "Optimus Data Downloader/1.0"}
req = Request(url, None, headers)
logger.print("Downloading %s from %s", data_name, url)
# It seems that avro need a .avro extension file
with tempfile.NamedTemporaryFile(suffix="." + file_format, delete=False) as f:
bytes_downloaded = write_file(urlopen(req), f)
path = f.name
if bytes_downloaded > 0:
logger.print("Downloaded %s bytes", bytes_downloaded)
logger.print("Creating DataFrame for %s. Please wait...", data_name)
return path
@functools.lru_cache(maxsize=128)
def prepare_path(path, file_format=None):
"""d
Helper to return the file to be loaded and the file name.
This will memoise
:param path: Path to the file to be loaded
:param file_format: format file
:return:
"""
r = []
if is_url(path):
file = downloader(path, file_format)
file_name = ntpath.basename(path)
r = [(file, file_name,)]
else:
for file_name in glob.glob(path, recursive=True):
r.append((file_name, ntpath.basename(file_name),))
if len(r) == 0:
raise Exception("File not found")
return r
def set_func(pdf, value, where, output_col, parser, default=None):
"""
Core implementation of the set function
:param pdf:
:param value:
:param where:
:param output_col:
:param parser:
:param default:
:return:
"""
col_names = list(filter(lambda x: x != "__match__", pdf.cols.names()))
profiler_dtype_to_python = {"decimal": "float", "int": "int", "string": "str", "datetime": "datetime",
"bool": "bool", "zip_code": "str"}
df = pdf.cols.cast(col_names, profiler_dtype_to_python[parser])
try:
if where is None:
return eval(value)
else:
# Reference https://stackoverflow.com/questions/33769860/pandas-apply-but-only-for-rows-where-a-condition-is-met
mask = (eval(where))
if (output_col not in pdf.cols.names()) and (default is not None):
pdf[output_col] = pdf[default]
pdf.loc[mask, output_col] = eval(value)
return pdf[output_col]
except (ValueError, TypeError) as e:
logger.print(e)
# raise
return np.nan
def set_function_parser(df, value, where, default=None):
"""
Infer the data type that must be used to make result calculation using the set function
:param df:
:param value:
:param where:
:return:
"""
value = str(value)
where = str(where)
def prepare_columns(cols):
"""
Extract the columns names from the value and where params
:param cols:
:return:
"""
if cols is not None:
r = val_to_list([f_col[1:len(f_col) - 1] for f_col in
re.findall(r"(df\['[A-Za-z0-9_ -]*'\])", cols.replace("\"", "'"))])
result = [re.findall(r"'([^']*)'", i)[0] for i in r]
else:
result = []
return result
if default is None:
default = []
# if default is in
columns = prepare_columns(value) + prepare_columns(where) + val_to_list(default)
columns = list(set(columns))
if columns:
first_columns = columns[0]
column_dtype = df.cols.infer_profiler_dtypes(first_columns)[first_columns]["dtype"]
else:
if fastnumbers.fast_int(value):
column_dtype = "int"
elif fastnumbers.fast_float(value):
column_dtype = "decimal"
else:
column_dtype = "string"
# if column_dtype in PROFILER_NUMERIC_DTYPES:
# func = lambda x: fastnumbers.fast_float(x) if x is not None else None
# elif column_dtype in PROFILER_STRING_DTYPES or column_dtype is None:
# func = lambda x: str(x) if not pd.isnull(x) else None
return columns, column_dtype
# value = "dd/MM/yyyy hh:mm:ss-sss MA"
def match_date(value):
"""
Returns Create a regex from a string with a date format
:param value:
:return:
"""
formats = ["d", "dd", "M", "MM", "yy", "yyyy", "h", "hh", "H", "HH", "kk", "k", "m", "mm", "s", "ss", "sss", "/",
":", "-", " ", "+", "|", "mi"]
formats.sort(key=len, reverse=True)
result = []
start = 0
end = len(value)
found = False
while start < end:
found = False
for f in formats:
if value.startswith(f, start):
start = start + len(f)
result.append(f)
found = True
break
if found is False:
raise ValueError('{} is not a valid date format'.format(value[start]))
exprs = []
for f in result:
# Separators
if f in ["/", ":", "-", " ", "|", "+", " "]:
exprs.append("\\" + f)
# elif f == ":":
# exprs.append("\\:")
# elif f == "-":
# exprs.append("\\-")
# elif f == " ":
# exprs.append(" ")
# elif f == "|":
# exprs.append("\\|")
# elif f == "+":
# exprs.append("\\+")
# Day
# d -> 1 ... 31
# dd -> 01 ... 31
elif f == "d":
exprs.append("(3[01]|[12][0-9]|0?[1-9])")
elif f == "dd":
exprs.append("(3[01]|[12][0-9]|0[1-9])")
# Month
# M -> 1 ... 12
# MM -> 01 ... 12
elif f == "M":
exprs.append("(1[0-2]|0?[1-9])")
elif f == "MM":
exprs.append("(1[0-2]|0[1-9])")
# Year
# yy -> 00 ... 99
# yyyy -> 0000 ... 9999
elif f == "yy":
exprs.append("[0-9]{2}")
elif f == "yyyy":
exprs.append("[0-9]{4}")
# Hours
# h -> 1,2 ... 12
# hh -> 01,02 ... 12
# H -> 0,1 ... 23
# HH -> 00,01 ... 23
# k -> 1,2 ... 24
# kk -> 01,02 ... 24
elif f == "h":
exprs.append("(1[0-2]|0?[1-9])")
elif f == "hh":
exprs.append("(1[0-2]|0[1-9])")
elif f == "H":
exprs.append("(0?[0-9]|1[0-9]|2[0-3]|[0-9])")
elif f == "HH":
exprs.append("(0[0-9]|1[0-9]|2[0-3]|[0-9])")
elif f == "k":
exprs.append("(0?[1-9]|1[0-9]|2[0-4]|[1-9])")
elif f == "kk":
exprs.append("(0[1-9]|1[0-9]|2[0-4])")
# Minutes
# m -> 0 ... 59
# mm -> 00 .. 59
elif f == "m":
exprs.append("[1-5]?[0-9]")
elif f == "mm":
exprs.append("[0-5][0-9]")
# Seconds
# s -> 0 ... 59
# ss -> 00 .. 59
elif f == "s":
exprs.append("[1-5]?[0-9]")
elif f == "ss":
exprs.append("[0-5][0-9]")
# Milliseconds
# sss -> 0 ... 999
elif f == "sss":
exprs.append("[0-9]{3}")
# Extras
# mi -> Meridian indicator (AM am Am) (PM pm Pm) (m M)
elif f == "mi":
exprs.append("([AaPp][Mm]|[Mm]).?")
return "".join(exprs)
# print("^" + match_date(value) + "$")
def ipython_vars(globals_vars, dtype=None):
"""
Return the list of data frames depending on the type
:param globals_vars: globals() from the notebook
:param dtype: 'pandas', 'cudf', 'dask' or 'dask_cudf'
:return:
"""
tmp = globals_vars.copy()
vars = [(k, v, type(v)) for k, v in tmp.items() if
not k.startswith('_') and k != 'tmp' and k != 'In' and k != 'Out' and not hasattr(v, '__call__')]
if dtype == "dask_cudf":
from dask_cudf.core import DataFrame as DaskCUDFDataFrame
_dtype = DaskCUDFDataFrame
elif dtype == "cudf":
from cudf.core import DataFrame as CUDFDataFrame
_dtype = CUDFDataFrame
elif dtype == "dask":
from dask.dataframe.core import DataFrame
_dtype = DataFrame
elif dtype == "pandas":
import pandas as pd
PandasDataFrame = pd.DataFrame
_dtype = PandasDataFrame
return [name for name, instance, aa in vars if is_(instance, _dtype)]
# Taken from https://github.com/Kemaweyan/singleton_decorator/
class _SingletonWrapper:
"""
A singleton wrapper class. Its instances would be created
for each decorated class.
"""
def __init__(self, cls):
self.__wrapped__ = cls
self._instance = None
def __call__(self, *args, **kwargs):
"""Returns a single instance of decorated class"""
if self._instance is None:
self._instance = self.__wrapped__(*args, **kwargs)
return self._instance
def singleton(cls):
"""
A singleton decorator. Returns a wrapper objects. A call on that object
returns a single instance object of decorated class. Use the __wrapped__
attribute to access decorated class directly in unit tests
"""
return _SingletonWrapper(cls)
|
py | 1a523a2098aae3c8ae5cb0b2796ab227076d7108 | import copy
import json
import os
import platform
from datetime import datetime
from pathlib import Path
from subprocess import (run, CalledProcessError)
TERMINAL = {
'Linux': 'gnome-terminal',
'Windows': 'start powershell -WorkingDirectory',
'Darwin': 'open -n /Applications/Utilities/Terminal.app'
}
def openInExplorer(_path_):
_os_ = platform.system()
try:
if _os_ == 'Windows':
run([os.path.join(os.getenv('WINDIR'), 'explorer.exe'),
'/select', os.path.normpath(_path_)])
elif _os_ == 'Darwin':
run(['open', _path_])
elif _os_ == 'Linux':
run(['xdg-open', _path_])
return 'done'
except CalledProcessError:
return 'failed'
def projects():
"""morphing this into a function means it captures any and all changes to json"""
with open('projects.json') as json_config:
try:
return json.load(json_config)
except:
return {'_init': 'false'}
def loggedIn():
try:
return projects()['logged_in'] != 'false'
except KeyError:
doInit()
return loggedIn()
def dirScan(dir_to_scan):
"""10x faster parsing with ls -R over find!!"""
found_files = [str(f)
for f in Path(dir_to_scan).rglob("*corvid-package.json")]
corvid_files = []
for _file_ in found_files:
if not 'src/corvid-package.json' in _file_:
continue
project_loc = _file_.split('/src/corvid-package.json')[0]
try:
corvid_files.append({
'abs_dir': project_loc,
'slug': project_loc.split('/')[-1],
'last_updated': datetime.now(),
'due_date': 'none',
'favorited': 'false'
})
except IndexError:
print(_file_)
return corvid_files
def subdirs(project_path):
"""this function will open only useful files in project directory in your editor of choice"""
if not project_path.endswith('/'):
project_path += '/'
valid = {
'pages': {'path': 'src/pages', 'type': 'js'},
'backend': {'path': 'src/backend', 'type': 'any'},
'public': {'path': 'src/public', 'type': 'any'},
'lightboxes': {'path': 'src/lightboxes', 'type': 'js'}
}
def search(prop):
_path_ = project_path + valid[prop]['path']
to_open = Path(_path_).rglob("*")
def check(_):
if _.endswith('tsconfig.json') or _.endswith('authorization-config.json'):
return False
if valid[prop]['type'] == 'any':
return True
return valid[prop]['type'] == _.split('.')[-1]
return [json.dumps(f'{_path_}/{_file_}') for _file_ in to_open if check(_file_)]
return " ".join([search(_key_) for _key_ in {**valid}][0]) or None
def qzWrite(new_json):
"""rewrites json file with requested changes"""
new_json['last_updated'] = datetime.now()
with open('projects.json', 'w', encoding='utf-8') as current_json:
json.dump(new_json, current_json,
ensure_ascii=False, indent=2, default=str, sort_keys=True)
return 'done'
def isInt(n_o):
"""parseInt polyfill"""
try:
return int(n_o)
except ValueError:
return False
def isInit():
"""checks json to ensure existing install"""
try:
return projects()['_init'] and projects()['_init'] != 'false'
except KeyError:
doInit()
return True
def getProjects():
"""current projects list"""
try:
return projects()['local_projects']
except KeyError:
doInit()
return getProjects()
def writeProjects(new_projects):
if not isinstance(new_projects, list):
raise 'Not a valid projects format'
try:
re_write = clonedProjects()
current_projects = getProjects()
to_keep = [p for p in current_projects if p['abs_dir'] in new_projects]
for _p_ in new_projects:
if not _p_ in to_keep:
to_keep.append({
'slug': _p_.split('/')[-1],
'abs_dir': _p_,
'last_updated': datetime.now(),
'favorited': 'false',
'due_date': 'none'
})
re_write['local_projects'] = to_keep
qzWrite(re_write)
return 'done'
except:
raise 'Uncaught error writing projects'
def getDirs():
"""current directories dictionary"""
try:
return projects()['_watched']
except KeyError:
doInit()
return getDirs()
def writeDirs(new_dirs):
if not isinstance(new_dirs, list):
raise "Not a valid dirs format"
try:
re_write = clonedProjects()
for new_dir in new_dirs:
if new_dir == mainDir():
re_write['main'] = new_dir
else:
new_key = new_dir.split('/')[-1]
re_write['_watched'][new_key] = new_dir
return 'done'
except:
raise 'Uncaught error writing dirs'
def clonedProjects():
"""clones json file for overwriting current"""
return copy.deepcopy(projects())
def mainDir(new_dir=None):
"""sets or gets the main project directory
sets if argument is passed
gets if no argument"""
if not new_dir:
return getDirs()['main']
qz_clone = clonedProjects()
try:
curr_key = [c for c in {**getDirs()} if c == new_dir][0]
curr_path = getDirs()[curr_key]
repl_key = qz_clone['_watched']['main'].split('/')[-1]
qz_clone['_watched'][repl_key] = qz_clone['_watched']['main']
del qz_clone['_watched'][curr_key]
qz_clone['_watched']['main'] = curr_path
except KeyError:
raise 'Replacement failed'
qzWrite(qz_clone)
return 'done'
def doInit(refresh=False):
"""IMPORTANT: Changes made here must be made to rest of this script."""
usr_home = str(Path.home())
init_project = {
'_init': 'yes',
'_os': platform.system(),
'_watched': {'main': usr_home},
'last_updated': datetime.now(),
'local_projects': dirScan(usr_home),
}
if not refresh:
init_project['logged_in'] = 'false'
init_project['_created'] = datetime.now()
init_project['_config'] = {
'update_on_start': 'false',
'terminal': TERMINAL[platform.system()],
'text_editor': 'none',
'font': {'size': 'none', 'family': 'none'},
'highlight_color': 'none'
}
else:
init_project['logged_in'] = projects()['logged_in']
init_project['_created'] = projects()['_created']
init_project['_config'] = projects()['_config']
qzWrite(init_project)
return 'done'
def getByContext(context):
"""gets project by name or index and returns its full location path"""
if context == '0':
return getProjects()[0]['abs_dir']
if isInt(context):
return getProjects()[int(context)]['abs_dir']
if not '/' in context:
closest_match = ''
for _ix_, _item_ in enumerate(getProjects()):
if _item_['slug'] == context:
return _item_['abs_dir']
if _item_['slug'] in context:
closest_match = _item_['abs_dir']
if _ix_ == len(getProjects()) - 1 and closest_match != '':
return closest_match
else:
return [_path_['abs_dir'] for _path_ in getProjects() if context in _path_['abs_dir']][0]
return False
def withConfig(config=None):
"""sets or gets the config object"""
if isInit():
qz_clone = clonedProjects()
if not config:
return projects()['_config']
if isinstance(config, str):
try:
return projects()['_config'][config]
except:
raise 'main.py: Not a valid key in config'
if isinstance(config, dict):
for _key_ in {**config}:
qz_clone['_config'][_key_] = config[_key_]
qzWrite(qz_clone)
return qz_clone['_config']
raise f'main.py: {config} not a valid parameter for config method'
else:
doInit()
return withConfig(config)
def create(_dir_, _url_, do_debug=False):
"""
clones a new project
if auto is True, project editor is opened immediately upon creation
"""
filtered = [o for o in getProjects() if o['abs_dir'] == _dir_]
if len(filtered) > 0:
raise 'Project already exists!'
try:
if (os.path.isdir(_dir_)):
raise 'A project in that directory already exists!'
Path(_dir_).mkdir(parents=True, exist_ok=True)
if do_debug:
do_exec = 'corvid-debug'
else:
do_exec = 'corvid'
args = ['npx', do_exec, 'clone', _url_]
npm_init = run(['npm', 'init', '-y'], cwd=_dir_)
npm_init.check_returncode()
if npm_init.stderr:
print(npm_init.stderr)
else:
print(npm_init.stdout)
npx_downloading = run(args, cwd=_dir_)
npx_downloading.check_returncode()
if npx_downloading.stderr:
print(npx_downloading.stderr)
raise f"""main.py: Error creating {_dir_}
{npx_downloading.stderr}"""
if npx_downloading.stdout:
print(npx_downloading.stdout)
return 'done'
return 'Invalid params'
except CalledProcessError:
raise f"""main.py: failed to create {_dir_}
{CalledProcessError}"""
def openByContext(_id_, do_debug=False, text_editor=False):
"""opens project by index or name"""
try:
curr_f = getByContext(_id_)
if text_editor:
usr_editor = withConfig('editor') or 'atom'
found_files = subdirs(curr_f)
if found_files:
project_files = [usr_editor, *found_files]
else:
project_files = [usr_editor]
text_editor = run(project_files)
text_editor.check_returncode()
debug_state = 'corvid'
if do_debug:
debug_state += '-debug'
local_editor = run(
['npx', debug_state, 'open-editor'], cwd=curr_f)
local_editor.check_returncode()
return 'opening'
except CalledProcessError:
raise f"""main.py: Error opening {_id_}
{CalledProcessError}"""
def openInTerminal(_id_):
"""opens project directory in terminal emulator"""
try:
target_dir = getByContext(_id_)
try:
usr_emulator = withConfig('terminal')
except KeyError:
usr_emulator = TERMINAL[platform.system()]
opening_terminal = run([usr_emulator], cwd=target_dir)
opening_terminal.check_returncode()
return True
except CalledProcessError:
raise f"""main.py: Error opening {_id_}
{CalledProcessError}"""
def appendProject(_id_):
"""writes an existing project to watch list --- does not clone"""
qz_clone = clonedProjects()
if getByContext(_id_.split('/')[-1]):
return print('Project already exists!')
try:
new_project = {
'abs_dir': _id_,
'slug': _id_.split('/')[-1],
'last_updated': datetime.now(),
'due_date': 'none',
'favorited': 'false'
}
qz_clone['local_projects'].append(new_project)
qzWrite(qz_clone)
return 'done!'
except:
raise f'main.py: Error while appending {_id_}'
def deleteProject(_id_):
"""deletes a watched project's entry in the [projects] array"""
qz_clone = clonedProjects()
to_delete = getByContext(_id_)
to_write = []
for _item_ in qz_clone['local_projects']:
if _item_['abs_dir'] != to_delete:
to_write.append(_item_)
qz_clone['local_projects'] = to_write
qzWrite(qz_clone)
return 'done'
def getSnapshots(_id_):
"""returns an array of snapshot dirnames for given project"""
curr_f = getByContext(_id_) + '/.corvid/snapshots'
if not os.path.isdir(curr_f):
raise f'main.py: {_id_} has no snapshots yet!'
return [f for f in Path(curr_f).glob("*") if os.path.isdir(f)]
def toggleFavorite(_id_):
"""ability to tag projects as starred"""
qz_clone = clonedProjects()
focused_project = [px for px in getProjects(
) if px['abs_dir'] == getByContext(_id_)][0]
focused_index = qz_clone['local_projects'].index(focused_project)
is_favorited = focused_project['favorited']
if is_favorited == 'true':
qz_clone['local_projects'][focused_index]['favorited'] = 'false'
else:
qz_clone['local_projects'][focused_index]['favorited'] = 'true'
qzWrite(qz_clone)
return 'done'
def setDeadline(_id_, date_set):
"""adds or sets a project deadline"""
qz_clone = clonedProjects()
focused_project = [px for px in getProjects(
) if px['abs_dir'] == getByContext(_id_)][0]
to_set = qz_clone['local_projects'].index(focused_project)
if isinstance(date_set, str):
qz_clone['local_projects'][to_set]['due_date'] = date_set
qzWrite(qz_clone)
return 'done'
raise 'main.py: Not a valid date object'
def loginHandler():
qz_clone = clonedProjects()
try:
login_attempt = run(["npx", "corvid", "login"], capture_output=True)
if login_attempt.check_returncode() == 0:
qz_clone['logged_in'] = 'true'
else:
qz_clone['logged_in'] = 'false'
except CalledProcessError:
qz_clone['logged_in'] = 'false'
finally:
qzWrite(qz_clone)
def logoutHandler():
try:
qz_clone = clonedProjects()
logout_attempt = run(["npx", "corvid", "logout"], capture_output=True)
if logout_attempt.check_returncode() == 0:
qz_clone['logged_in'] = 'false'
else:
qz_clone['logged_in'] = 'true'
qzWrite(qz_clone)
except CalledProcessError:
return "logout aborted"
|
py | 1a523bb3d0decb9c4cc6968c9f826408dd330de6 | """
ART Attack Runner
Version: 1.0
Author: Olivier Lemelin
Script that was built in order to automate the execution of ART.
"""
import os
import os.path
import fnmatch
import platform
import re
import subprocess
import sys
import hashlib
import json
import argparse
import yaml
import unidecode
# pylint: disable=line-too-long, invalid-name
TECHNIQUE_DIRECTORY_PATTERN = 'T*'
ATOMICS_DIR_RELATIVE_PATH = os.path.join("..", "..", "..", "atomics")
HASH_DB_RELATIVE_PATH = "techniques_hash.db"
COMMAND_TIMEOUT = 20
##########################################
# Filesystem & Helpers
##########################################
def get_platform():
"""Gets the current platform."""
# We need to handle the platform a bit differently in certain cases.
# Otherwise, we simply return the value that's given here.
plat = platform.system().lower()
if plat == "darwin":
# 'macos' is the term that is being used within the .yaml files.
plat = "macos"
return plat
def get_self_path():
"""Gets the full path to this script's directory."""
return os.path.dirname(os.path.abspath(__file__))
def get_yaml_file_from_dir(path_to_dir):
"""Returns path of the first file that matches "*.yaml" in a directory."""
for entry in os.listdir(path_to_dir):
if fnmatch.fnmatch(entry, '*.yaml'):
# Found the file!
return os.path.join(path_to_dir, entry)
print("No YAML file describing the technique in {}!".format(path_to_dir))
return None
def load_technique(path_to_dir):
"""Loads the YAML content of a technique from its directory. (T*)"""
# Get path to YAML file.
file_entry = get_yaml_file_from_dir(path_to_dir)
# Load and parses its content.
with open(file_entry, 'r', encoding="utf-8") as f:
return yaml.load(unidecode.unidecode(f.read()), Loader=yaml.SafeLoader)
def load_techniques():
"""Loads multiple techniques from the 'atomics' directory."""
# Get path to atomics directory.
atomics_path = os.path.join(get_self_path(),
ATOMICS_DIR_RELATIVE_PATH)
normalized_atomics_path = os.path.normpath(atomics_path)
print("Loading techniques from {}...".format(normalized_atomics_path))
# Create a dict to accept the techniques that will be loaded.
techniques = {}
print("Loading Technique", end="")
# For each tech directory in the main directory.
for atomic_entry in os.listdir(normalized_atomics_path):
# Make sure that it matches the current pattern.
if fnmatch.fnmatch(atomic_entry, TECHNIQUE_DIRECTORY_PATTERN):
print(", {}".format(atomic_entry), end="")
# Get path to tech dir.
path_to_dir = os.path.join(normalized_atomics_path, atomic_entry)
# Load, parse and add to dict.
tech = load_technique(path_to_dir)
techniques[atomic_entry] = tech
# Add path to technique's directory.
techniques[atomic_entry]["path"] = path_to_dir
print(".")
return techniques
def check_dependencies(executor, cwd):
dependencies = "dependencies"
dependencies_executor = "dependency_executor_name"
prereq_command = "prereq_command"
get_prereq_command = "get_prereq_command"
input_arguments = "input_arguments"
# If the executor doesn't have dependencies_executor key it doesn't have dependencies. Skip
if dependencies not in executor or dependencies not in executor:
print(
"No '{}' or '{}' section found in the yaml file. Skipping dependencies check.".format(dependencies_executor,
dependencies))
return True
launcher = executor[dependencies_executor]
for dep in executor[dependencies]:
args = executor[input_arguments] if input_arguments in executor else {}
final_parameters = set_parameters(args, {})
command = build_command(launcher, dep[prereq_command], final_parameters, cwd)
p = subprocess.Popen(launcher, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=os.environ, cwd=cwd)
p.communicate(bytes(command, "utf-8") + b"\n", timeout=COMMAND_TIMEOUT)
# If the dependencies are not satisfied the command will exit with code 1, 0 otherwise.
if p.returncode != 0:
print("Dependencies not found. Fetching them...")
if get_prereq_command not in dep:
print("Missing {} commands in the yaml file. Can't fetch requirements".format(get_prereq_command))
return False
command = build_command(launcher, dep[get_prereq_command], final_parameters, cwd)
d = subprocess.Popen(launcher, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=os.environ, cwd=cwd)
out, err = d.communicate(bytes(command, "utf-8") + b"\n", timeout=COMMAND_TIMEOUT)
p.terminate()
return True
##########################################
# Executors
##########################################
def is_valid_executor(exe, self_platform):
"""Validates that the executor can be run on the current platform."""
if self_platform not in exe["supported_platforms"]:
return False
# The "manual" executors need to be run by hand, normally.
# This script should not be running them.
if exe["executor"]["name"] == "manual":
return False
return True
def get_valid_executors(tech):
"""From a loaded technique, get all executors appropriate for the current platform."""
return list(filter(lambda x: is_valid_executor(x, get_platform()), tech['atomic_tests']))
def get_executors(tech):
"""From a loaded technique, get all executors."""
return tech['atomic_tests']
def print_input_arguments(executor):
"""Prints out the input arguments of an executor in a human-readable manner."""
if "input_arguments" in executor:
for name, values in executor["input_arguments"].items():
print("{name}: {description} (default: {default})".format(name=name,
description=values["description"],
default=values["default"]))
def print_executor(executor):
"""Prints an executor in a human-readable manner."""
print("\n-----------------------------------------------------------")
print("Name: " + executor["name"].strip())
print("Description: " + executor["description"].strip())
print("Platforms: " + ", ".join(map(lambda x: x.strip(), executor["supported_platforms"])))
print("\nArguments:")
print_input_arguments(executor)
print("\nLauncher: " + executor["executor"]["name"])
print("Command: " + executor["executor"]["command"] + "\n")
def executor_get_input_arguments(input_arguments):
"""Gets the input arguments from the user, displaying a prompt and converting them."""
# Empty dict to hold on the parameters.
parameters = {}
for name, values in input_arguments.items():
# If answer, use that.
answer = input_string("Please provide a parameter for '{name}' (blank for default)".format(name=name))
# If no answer, use the default.
if not answer:
answer = values["default"]
# Cast parameter to string
parameters[name] = str(answer)
return parameters
def print_non_interactive_command_line(technique_name, executor_number, parameters, check_dep, run_cleanup):
"""Prints the comand line to use in order to launch the technique non-interactively."""
flag_dep = ""
flag_cleanup = ""
if check_dep:
flag_dep = "--dependencies"
if run_cleanup:
flag_cleanup = "--cleanup"
print("In order to run this non-interactively:")
print(" Python:")
print(" techniques = runner.AtomicRunner()")
print(
" techniques.execute(\"{name}\", position={pos}, parameters={params}, dependencies={dep}, cleanup={cleanup})".format(
name=technique_name, pos=executor_number, params=parameters, dep=check_dep, cleanup=run_cleanup))
print(" Shell Script:")
print(" python3 runner.py run {name} {pos} --args '{params}' {dep} {cleanup}\n".format(name=technique_name,
pos=executor_number,
params=json.dumps(
parameters),
dep=flag_dep,
cleanup=flag_cleanup))
def interactive_apply_executor(executor, path, technique_name, executor_number):
"""Interactively run a given executor."""
# Prints information about the executor.
print_executor(executor)
# Request if we still want to run this.
if not yes_or_no("Do you want to run this? "):
print("Cancelled.")
return
# Request if we want to check the dependencies before running the executor.
check_dep = yes_or_no("Do you want to check dependencies? ")
# Request if we want to cleanup after the executor completes.
run_cleanup = yes_or_no("Do you want to run the cleanup after the executor completes? ")
# If so, get the input parameters.
if "input_arguments" in executor:
parameters = executor_get_input_arguments(executor["input_arguments"])
else:
parameters = {}
if check_dep:
if not check_dependencies(executor, path):
print("Check dependencies failed. Cancelling...")
return
# Prints the Command line to enter for non-interactive execution.
print_non_interactive_command_line(technique_name, executor_number, parameters, check_dep, run_cleanup)
launcher = convert_launcher(executor["executor"]["name"])
command = executor["executor"]["command"]
built_command = build_command(launcher, command, parameters, path)
# begin execution with the above parameters.
execute_command(launcher, built_command, path)
if run_cleanup:
apply_cleanup(executor, path, parameters)
def get_default_parameters(args):
"""Build a default parameters dictionary from the content of the YAML file."""
return {name: values["default"] for name, values in args.items()}
def set_parameters(executor_input_arguments, given_arguments):
"""Sets the default parameters if no value was given."""
# Default parameters as decribed in the executor.
default_parameters = get_default_parameters(executor_input_arguments)
# Merging default parameters with the given parameters, giving precedence
# to the given params.
final_parameters = {**default_parameters, **given_arguments}
# Cast parameters to string
for name, value in final_parameters.items():
final_parameters[name] = str(value)
return final_parameters
def apply_executor(executor, path, parameters):
"""Non-interactively run a given executor."""
args = executor["input_arguments"] if "input_arguments" in executor else {}
final_parameters = set_parameters(args, parameters)
launcher = convert_launcher(executor["executor"]["name"])
command = executor["executor"]["command"]
built_command = build_command(launcher, command, final_parameters, path)
# begin execution with the above parameters.
output = execute_command(launcher, built_command, path)
return output
def apply_cleanup(executor, path, parameters):
if "cleanup_command" not in executor["executor"] or executor["executor"]["cleanup_command"] == None:
print("No cleanup section found in the yaml file. Skipping...")
return
args = executor["input_arguments"] if "input_arguments" in executor else {}
final_parameters = set_parameters(args, parameters)
launcher = convert_launcher(executor["executor"]["name"])
command = executor["executor"]["cleanup_command"]
built_command = build_command(launcher, command, final_parameters, path)
# begin execution with the above parameters.
execute_command(launcher, built_command, path)
##########################################
# Text Input
##########################################
def yes_or_no(question):
"""Asks a yes or no question, and captures input. Blank input is interpreted as Y."""
reply = str(input(question + ' (Y/n): ')).capitalize().strip()
if reply == "": # pylint: disable=no-else-return
return True
elif reply[0] == 'Y':
return True
elif reply[0] == 'N':
return False
return yes_or_no("Please enter Y or N.")
def input_string(message):
"""Asks a question and captures the string output."""
return str(input(message + ': ')).strip()
def parse_number_input(user_input):
"""Converts a string of space-separated numbers to an array of numbers."""
lst_str = user_input.strip().split(' ')
return list(map(int, lst_str))
##########################################
# Commands
##########################################
class ManualExecutorException(Exception):
"""Custom Exception that we trigger triggered when we encounter manual executors."""
pass
def convert_launcher(launcher):
"""Takes the YAML launcher, and outputs an appropriate executable
to run the command."""
plat = get_platform()
# Regular command prompt.
if launcher == "command_prompt": # pylint: disable=no-else-return
if plat == "windows": # pylint: disable=no-else-return
# This is actually a 64bit CMD.EXE. Do not change this to a 32bits CMD.EXE
return "C:\\Windows\\System32\\cmd.exe"
elif plat == "linux":
# Good ol' Bourne Shell.
return "/bin/sh"
elif plat == "macos":
# I assume /bin/sh is available on OSX.
return "/bin/sh"
else:
# We hit a non-Linux, non-Windows OS. Use sh.
print("Warning: Unsupported platform {}! Using /bin/sh.".format(plat))
return "/bin/sh"
elif launcher == "powershell":
return "C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe"
elif launcher == "sh":
return "/bin/sh"
elif launcher == "bash":
return "/bin/bash"
elif launcher == "manual":
# We cannot process manual execution with this script. Raise an exception.
raise ManualExecutorException()
else:
# This launcher is not known. Returning it directly.
print("Warning: Launcher '{}' has no specific case! Invoking as is.".format(launcher))
return launcher
def build_command(launcher, command, parameters, path): # pylint: disable=unused-argument
"""Builds the command line that will eventually be run."""
# Using a closure! We use the replace to match found objects
# and replace them with the corresponding passed parameter.
def replacer(matchobj):
if matchobj.group(1) in parameters:
val = parameters[matchobj.group(1)]
else:
print("Warning: no match found while building the replacement string.")
val = None
return val
# Fix string interpolation (from ruby to Python!) -- ${}
command = re.sub(r"\$\{(.+?)\}", replacer, command)
# Fix string interpolation (from ruby to Python!) -- #{}
command = re.sub(r"\#\{(.+?)\}", replacer, command)
# Replace instances of PathToAtomicsFolder
atomics = os.path.join(path, "..")
command = command.replace("$PathToAtomicsFolder", atomics)
command = command.replace("PathToAtomicsFolder", atomics)
return command
def execute_subprocess(launcher, command, cwd):
p = subprocess.Popen(launcher, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=os.environ, cwd=cwd)
try:
outs, errs = p.communicate(bytes(command, "utf-8") + b"\n", timeout=COMMAND_TIMEOUT)
return outs, errs
except subprocess.TimeoutExpired as e:
# Display output if it exists.
if e.output:
print(e.output)
if e.stdout:
print(e.stdout)
if e.stderr:
print(e.stderr)
print("Command timed out!")
# Kill the process.
p.kill()
return "", ""
def print_process_output(outs, errs):
def clean_output(s):
# Remove Windows CLI garbage
s = re.sub(r"Microsoft\ Windows\ \[version .+\]\r?\nCopyright.*(\r?\n)+[A-Z]\:.+?\>", "", s)
return re.sub(r"(\r?\n)*[A-Z]\:.+?\>", "", s)
# Output the appropriate outputs if they exist.
if outs:
print("Output: {}".format(clean_output(outs.decode("utf-8", "ignore"))), flush=True)
else:
print("(No output)")
if errs:
print("Errors: {}".format(clean_output(errs.decode("utf-8", "ignore"))), flush=True)
def execute_command(launcher, command, cwd):
"""Executes a command with the given launcher."""
print("\n------------------------------------------------")
# If launcher is powershell we execute all commands under a single process
# powershell.exe -Command - (Tell powershell to read scripts from stdin)
if "powershell" in launcher:
outs, errs = execute_subprocess([launcher, '-Command', '-'], command, cwd)
print_process_output((command.encode() + b":\n" + outs), errs)
else:
cumulative_out = b""
cumulative_err = b""
for comm in command.split("\n"):
# We skip empty lines. This is due to the split just above.
if comm == "":
continue
# # We actually run the command itself.
outs, errs = execute_subprocess(launcher, comm, cwd)
print_process_output(outs, errs)
if outs is not None:
cumulative_out += b"> " + comm.encode() + b":\n" + outs
if errs is not None:
cumulative_err += errs
continue
outs = cumulative_out
errs = cumulative_err
return [outs, errs]
#########################################
# Hash database
#########################################
def load_hash_db():
"""Loads the hash database from a file, or create the empty file if it did not already exist."""
hash_db_path = os.path.join(get_self_path(), HASH_DB_RELATIVE_PATH)
try:
with open(hash_db_path, 'r') as f:
return json.load(f)
except json.JSONDecodeError:
print("Could not decode the JSON Hash DB! Please fix the syntax of the file.")
sys.exit(3)
except IOError:
print("File did not exist. Created a new empty Hash DB.")
empty_db = {}
write_hash_db(hash_db_path, empty_db)
return empty_db
def write_hash_db(hash_db_path, db):
"""Writes the hash DB dictionary to a file."""
with open(hash_db_path, 'w') as f:
json.dump(db, f, sort_keys=True, indent=4, separators=(',', ': '))
def check_hash_db(hash_db_path, executor_data, technique_name, executor_position):
"""Checks the hash DB for a hash, and verifies that it corresponds to the current executor data's
hash. Adds the hash to the current database if it does not already exist."""
hash_db = load_hash_db()
executor_position = str(executor_position)
# Tries to load the technique section.
if not technique_name in hash_db:
print("Technique section '{}' did not exist. Creating.".format(technique_name))
# Create section
hash_db[technique_name] = {}
new_hash = hashlib.sha256(json.dumps(executor_data).encode()).hexdigest()
# Tries to load the executor hash.
if not executor_position in hash_db[technique_name]:
print("Hash was not in DB. Adding.")
# Create the hash, since it does not exist. Return OK.
hash_db[technique_name][executor_position] = new_hash
# Write DB to file.
write_hash_db(hash_db_path, hash_db)
return True
old_hash = hash_db[technique_name][executor_position]
# If a previous hash already exists, compare both hashes.
return old_hash == new_hash
def clear_hash(hash_db_path, technique_to_clear, position_to_clear=-1):
"""Clears a hash from the DB, then saves the DB to a file."""
hash_db = load_hash_db()
if position_to_clear == -1:
# We clear out the whole technique.
del hash_db[technique_to_clear]
else:
# We clear the position.
del hash_db[technique_to_clear][str(position_to_clear)]
print("Hash cleared.")
write_hash_db(hash_db_path, hash_db)
#########################################
# Atomic Runner and Main
#########################################
class AtomicRunner():
"""Class that allows the execution, interactive or not, of the various techniques that are part of ART."""
def __init__(self):
"""Constructor. Ensures that the techniques are loaded before we can run them."""
# Loads techniques.
self.techniques = load_techniques()
def repl(self):
"""Presents a REPL to the user so that they may interactively run certain techniques."""
print("Enter the name of the technique that you would like to execute (eg. T1033). Type 'exit' to quit.")
i = input("> ").strip()
while True:
if i == "exit":
break
else:
if i in self.techniques:
self.interactive_execute(i)
else:
print("Technique '{}' does not exist.".format(i))
i = input("> ").strip()
def execute(self, technique_name, position=0, parameters=None, dependencies=False, cleanup=False):
"""Runs a technique non-interactively."""
parameters = parameters or {}
if technique_name not in self.techniques:
print("No technique {} found. Skipping...".format(technique_name))
return [b'', b'No technique found']
# Gets the tech.
tech = self.techniques[technique_name]
# Gets Executors.
executors = get_valid_executors(tech)
if len(executors) < position:
print("The position '{}' couldn't be found.".format(position))
print("The teqhnique {} has {} available tests for the current platform. Skipping...".format(technique_name,
len(
executors)))
return [b'', b'Executor not found. Out of bounds?']
print("================================================")
if dependencies:
print("Checking dependencies {}/{}\n".format(technique_name, position))
if not check_dependencies(executors[position], tech["path"]):
return [b'', b'Dependencies not met!']
print("Executing {}/{}\n".format(technique_name, position))
try:
# Get executor at given position.
executor = executors[position]
except IndexError:
print("Out of bounds: this executor is not part of that technique's list!")
return [b'', b'Out of bounds: this executor is not part of that technique\'s list!']
# Make sure that it is compatible.
if not is_valid_executor(executor, get_platform()):
print("Warning: This executor is not compatible with the current platform!")
return [b'', b'Warning: This executor is not compatible with the current platform!']
# Check that hash matches previous executor hash or that this is a new hash.
if not check_hash_db(HASH_DB_RELATIVE_PATH, executor, technique_name, position):
print("Warning: new executor fingerprint does not match the old one! Skipping this execution.")
print(
"To re-enable this test, review this specific executor, test your payload, and clear out this executor's hash from the database.")
print("Run this: python runner.py clearhash {} {}.".format(technique_name, position))
return [b'', b'Warning: new executor fingerprint does not match the old one! Skipping this execution.']
# Launch execution.
try:
response = apply_executor(executor, tech["path"], parameters)
except ManualExecutorException:
print("Cannot launch a technique with a manual executor. Aborting.")
return [b'', b'Cannot launch a technique with a manual executor. Aborting.']
finally:
if cleanup:
print("Running cleanup commands.")
apply_cleanup(executor, tech["path"], parameters)
return response
def interactive_execute(self, technique_name):
"""Interactively execute a single technique."""
# Gets the tech.
tech = self.techniques[technique_name]
# Gets the compatible executors for this current platform.
executors = get_valid_executors(tech)
# If there are none.
if not executors:
print("No valid executors for this platform/technique combination!")
return
# Display technique info
print("\n===========================================================")
print("{} - {}".format(tech["display_name"], tech["attack_technique"]))
# Get number of executors.
nb_executors = len(executors)
if nb_executors > 1:
# Displays all executors with the index (for the number choice).
for idx, executor in enumerate(executors):
# Make it better!
print("{}. ".format(idx))
print_executor(executor)
# Display prompt, and get input as number list.
while True:
user_input = input("Please choose your executors: (space-separated list of numbers): ")
try:
numbers = parse_number_input(user_input)
for i in numbers:
# Interactively apply all chosen executors.
interactive_apply_executor(executors[i], tech["path"], tech["attack_technique"], i)
break
except Exception as e: # pylint: disable=broad-except
print("Could not parse the input. make sure this is a space-separated list of integers.")
print(e)
else:
# We only have one executor in this case.
interactive_apply_executor(executors[0], tech["path"], tech["attack_technique"], 0)
def interactive(args): # pylint: disable=unused-argument
"""Launch the runner in interactive mode."""
runner = AtomicRunner()
runner.repl()
def run(args):
"""Launch the runner in non-interactive mode."""
runner = AtomicRunner()
runner.execute(args.technique, args.position, json.loads(args.args), args.dependencies, args.cleanup, )
def clear(args):
"""Clears a stale hash from the Hash DB."""
clear_hash(HASH_DB_RELATIVE_PATH, args.technique, args.position)
def main():
"""Main function, called every time this script is launched rather than imported."""
parser = argparse.ArgumentParser(description="Allows the automation of tests in the Atomic Red Team repository.")
subparsers = parser.add_subparsers()
parser_int = subparsers.add_parser('interactive', help='Runs the techniques interactively.')
parser_int.set_defaults(func=interactive)
parser_run = subparsers.add_parser('run', help="Ponctually runs a single technique / executor pair.")
parser_run.add_argument('technique', type=str, help="Technique to run.")
parser_run.add_argument('position', type=int, help="Position of the executor in technique to run.")
parser_run.add_argument("--dependencies", action='store_true',
help="Check for dependencies, in any, and fetch them if necessary.")
parser_run.add_argument("--cleanup", action='store_true',
help="Run cleanup commands, if any, after executor completed.")
parser_run.add_argument('--args', type=str, default="{}",
help="JSON string representing a dictionary of arguments (eg. '{ \"arg1\": \"val1\", \"arg2\": \"val2\" }' )")
parser_run.set_defaults(func=run)
parser_clear = subparsers.add_parser('clearhash',
help="Clears a hash from the database, allowing the technique to be run once again.")
parser_clear.add_argument('technique', type=str, help="Technique to run.")
parser_clear.add_argument('--position', '-p', type=int, default=-1,
help="Position of the executor in technique to run.")
parser_clear.set_defaults(func=clear)
try:
args = parser.parse_args()
args.func(args)
except AttributeError:
parser.print_help()
if __name__ == "__main__":
main()
|
py | 1a523c4b751a173ded24b64004266f2bb02a4492 | from collections import defaultdict
class UnionFind:
def __init__(self, n):
self.size = n
self.parents = [-1] * n
def union(self, x, y):
x = self.find(x)
y = self.find(y)
if x == y: return
if self.parents[x] > self.parents[y]: x, y = y, x
self.parents[x] += self.parents[y]
self.parents[y] = x
return x, y
def find(self, x):
if self.parents[x] < 0: return x
else:
self.parents[x] = self.find(self.parents[x])
return self.parents[x]
def group_size(self, x):
return -self.parents[self.find(x)]
def is_same_group(self, x, y):
return self.find(x) == self.find(y)
def members(self, x):
root_x = self.find(x)
return [i for i in range(self.size) if self.find(i) == root_x]
def roots(self):
return [i for i, x in enumerate(self.parents) if x < 0]
def group_count(self):
return len(self.roots())
def dict(self):
ret = defaultdict(list)
for x in range(self.size):
ret[self.find(x)].append(x)
return ret
n, m, k = map(int, input().split())
friend = [tuple(map(int, input().split())) for _ in range(m)]
enemy = [tuple(map(int, input().split())) for _ in range(k)]
follows = [[] for _ in range(n)]
for a, b in friend:
follows[a - 1].append(b - 1)
follows[b - 1].append(a - 1)
blocks = [[] for _ in range(n)]
for a, b in enemy:
blocks[a - 1].append(b - 1)
blocks[b - 1].append(a - 1)
dsu = UnionFind(n)
ans = [0] * n
for a, b in friend:
dsu.union(a - 1, b - 1)
for i in range(n):
ans_ = dsu.group_size(i) - 1
for j in follows[i]:
ans_ -= int(dsu.is_same_group(i, j))
for j in blocks[i]:
ans_ -= int(dsu.is_same_group(i, j))
ans[i] = ans_
print(*ans) |
py | 1a523c51622420c57ecf2c87563a5dfc3226ae86 | """Add contents_hash
Revision ID: 515f518eff57
Revises: 218fd78e07e8
Create Date: 2017-07-25 15:21:18.613141
"""
# revision identifiers, used by Alembic.
revision = '515f518eff57'
down_revision = '218fd78e07e8'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('RepositoryApps', sa.Column('contents_hash', sa.Unicode(length=255), nullable=True))
op.add_column('RepositoryApps', sa.Column('last_processed_contents_hash', sa.Unicode(length=255), nullable=True))
op.add_column('RepositoryApps', sa.Column('last_processed_downloaded_hash', sa.Unicode(length=255), nullable=True))
op.create_index(u'ix_RepositoryApps_contents_hash', 'RepositoryApps', ['contents_hash'], unique=False)
op.create_index(u'ix_RepositoryApps_last_processed_contents_hash', 'RepositoryApps', ['last_processed_contents_hash'], unique=False)
op.create_index(u'ix_RepositoryApps_last_processed_downloaded_hash', 'RepositoryApps', ['last_processed_downloaded_hash'], unique=False)
op.drop_index(u'ix_RepositoryApps_last_processed_hash', table_name='RepositoryApps')
op.drop_column('RepositoryApps', u'last_processed_hash')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('RepositoryApps', sa.Column(u'last_processed_hash', mysql.VARCHAR(length=255), nullable=True))
op.create_index(u'ix_RepositoryApps_last_processed_hash', 'RepositoryApps', [u'last_processed_hash'], unique=False)
op.drop_index(u'ix_RepositoryApps_last_processed_downloaded_hash', table_name='RepositoryApps')
op.drop_index(u'ix_RepositoryApps_last_processed_contents_hash', table_name='RepositoryApps')
op.drop_index(u'ix_RepositoryApps_contents_hash', table_name='RepositoryApps')
op.drop_column('RepositoryApps', 'last_processed_downloaded_hash')
op.drop_column('RepositoryApps', 'last_processed_contents_hash')
op.drop_column('RepositoryApps', 'contents_hash')
### end Alembic commands ###
|
py | 1a523c59afee8a01d2549e6848bb7048ea08234e | # !/usr/bin/env python
# -*- coding:utf-8 -*-
from collections import namedtuple
from copy import deepcopy
class DynamicObject(object):
"""Dynamic Object"""
def __init__(self, **attr_dict):
self._attr_dict = attr_dict
def __getattr__(self, name):
return self._attr_dict[name] if name in self._attr_dict else None
def __setattr__(self, name, value):
if '_attr_dict' == name:
object.__setattr__(self, name, value)
else:
if value is None:
if name in self._attr_dict:
del self._attr_dict[name]
else:
self._attr_dict[name] = value
def __getitem__(self, key):
return self.__getattr__(key)
def __setitem__(self, key, value):
self.__setattr__(key, value)
def __contains__(self, key):
return key in self._attr_dict
def __str__(self):
_Class = namedtuple(self.__class__.__name__, self._attr_dict.keys())
return str(_Class(*self._attr_dict.values()))
__repr__ = __str__
def __deepcopy__(self, memo):
return DynamicObject(**self._attr_dict)
def to_dict(self):
return deepcopy(self._attr_dict)
|
py | 1a523c7612bf878f9036407c56418a9eb6942df4 | # model settings
model = dict(
type='CenterNet',
pretrained='./pretrain/darknet53.pth',
backbone=dict(
type='DarknetV3',
layers=[1, 2, 8, 8, 4],
inplanes=[3, 32, 64, 128, 256, 512],
planes=[32, 64, 128, 256, 512, 1024],
norm_cfg=dict(type='BN'),
out_indices=(1, 2, 3, 4),
frozen_stages=1,
norm_eval=False),
neck=dict(type='None'),
bbox_head=dict(
type='CXTHead',
inplanes=(128, 256, 512, 1024),
head_conv=128,
wh_conv=64,
use_deconv=False,
norm_after_upsample=False,
hm_head_conv_num=2,
wh_head_conv_num=2,
ct_head_conv_num=1,
fovea_hm=False,
num_classes=81,
use_exp_wh=False,
wh_offset_base=16,
wh_agnostic=True,
shortcut_cfg=(1, 2, 3),
shortcut_attention=(False, False, False),
norm_cfg=dict(type='BN'),
norm_wh=False,
avg_wh_weightv3=False,
hm_center_ratio=0.27,
center_ratio=0.01,
hm_init_value=None,
giou_weight=5.,
merge_weight=1.,
hm_weight=1.,
ct_weight=1.))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(
vis_every_n_iters=100,
debug=False)
test_cfg = dict(
score_thr=0.01,
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=12,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0003,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[9, 11])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=9)
bbox_head_hist_config = dict(
model_type=['ConvModule', 'DeformConvPack'],
sub_modules=['bbox_head'],
save_every_n_steps=200)
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = 'eft53_htct027_whratio001_v1l_3lr_wd3e4_s123_nos_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
py | 1a523cb1d1c076a8cbb5fd64e189ea7f8af79b01 | #!/usr/bin/env python
from multiprocessing import Pool
import numpy as np
import os
import matplotlib.pyplot as plt
from functools import partial
import time
import copy
from scipy.stats import multivariate_normal
from scipy import stats
# from scipy.optimize import root
from scipy.optimize import bisect
from sklearn.gaussian_process.kernels import RBF, \
Matern
from pyapprox.density import tensor_product_pdf
from pyapprox.gaussian_process import CholeskySampler, AdaptiveGaussianProcess
from pyapprox.low_discrepancy_sequences import transformed_halton_sequence
from pyapprox.utilities import \
compute_f_divergence, pivoted_cholesky_decomposition, \
get_tensor_product_quadrature_rule
from pyapprox.probability_measure_sampling import rejection_sampling
from pyapprox.visualization import get_meshgrid_function_data
import matplotlib as mpl
mpl.rcParams['font.size'] = 16
mpl.rcParams['lines.linewidth'] = 3
mpl.rcParams['text.usetex'] = True # use latex for all text handling
mpl.rcParams['savefig.bbox'] = 'tight'
mpl.rcParams['savefig.format'] = 'pdf' # gives best resolution plots
mpl.rcParams['axes.labelsize'] = 20
mpl.rcParams['axes.titlesize'] = 20
mpl.rcParams['xtick.labelsize'] = 20
mpl.rcParams['ytick.labelsize'] = 20
mpl.rcParams['legend.fontsize'] = 16
# print mpl.rcParams.keys()
mpl.rcParams['text.latex.preamble'] = \
r'\usepackage{siunitx}\usepackage{amsmath}\usepackage{amssymb}'
def rosenbrock_function(x):
assert x.shape[0] == 2
x = 4*x-2
vals = ((1.-x[0, :])**2+100*(x[1, :]-x[0, :]**2)**2)[:, np.newaxis]
# vals = ((1.-x[0,:])**2+1*(x[1,:]-x[0,:]**2)**2)[:,np.newaxis]
return vals
def add_noise(values, noise_level):
noise = np.random.normal(0, noise_level)
return values + noise
class HaltonSampler(object):
def __init__(self, nvars, variables):
self.nvars = nvars
self.variables = variables
if self.variables is not None:
assert self.variables.num_vars() == self.nvars
self.marginal_icdfs = [
v.ppf for v in self.variables.all_variables()]
else:
self.marginal_icdfs = None
self.ntraining_samples = 0
self.training_samples = None
def __call__(self, nsamples):
self.training_samples = transformed_halton_sequence(
self.marginal_icdfs, self.nvars, nsamples)
new_samples = self.training_samples[:, self.ntraining_samples:]
self.ntraining_samples = self.training_samples.shape[1]
return new_samples, 0
def convergence_study(kernel, function, sampler,
num_vars, generate_samples, num_new_samples,
update_kernel_scale_num_samples,
noise_level=0, return_samples=False,
norm=np.linalg.norm, callback=None, gp_kernel=None):
# dirty hack to include two GP kernel types (for IVAR)
if hasattr(kernel, "__len__"):
# in this case, kernel is an array and we assume to have received
# two kernels
sampler_kernel = kernel[1]
kernel = kernel[0]
else:
sampler_kernel = kernel
# Instantiate a Gaussian Process model
if gp_kernel is None:
gp_kernel = kernel
gp = AdaptiveGaussianProcess(
gp_kernel, n_restarts_optimizer=10, alpha=1e-12)
gp.setup(function, sampler)
if hasattr(sampler, "set_gaussian_process"):
sampler.set_gaussian_process(gp)
print('sampler kernel', kernel, 'gp kernel', gp_kernel)
# Mesh the input space for evaluations of the real function,
# the prediction and its MSE
validation_samples = generate_samples()
validation_values = function(validation_samples).squeeze()
num_samples = np.cumsum(num_new_samples)
num_steps = num_new_samples.shape[0]
errors = np.empty(num_steps, dtype=float)
nsamples = np.empty(num_steps, dtype=int)
sample_step = 0
optimizer_step = 0
while sample_step < num_steps:
if hasattr(gp, 'kernel_'):
# if using const * rbf + noise kernel
# kernel.theta = gp.kernel_.k1.k2.theta
# if using const * rbf
# kernel.theta = gp.kernel_.k2.theta
# if using rbf
kernel.theta = gp.kernel_.theta
# Fit to data using Maximum Likelihood Estimation of the parameters
# if True:
if ((optimizer_step >= update_kernel_scale_num_samples.shape[0]) or
(sampler.ntraining_samples <
update_kernel_scale_num_samples[optimizer_step])):
gp.optimizer = None
else:
gp.optimizer = "fmin_l_bfgs_b"
optimizer_step += 1
flag = gp.refine(np.sum(num_new_samples[:sample_step+1]))
# allow points to be added to gp more often than gp is evaluated for
# validation
if sampler.ntraining_samples >= num_samples[sample_step]:
pred_values = gp(validation_samples, return_cov=False).squeeze()
# Compute error
assert pred_values.shape == validation_values.shape
error = norm(pred_values-validation_values)/norm(validation_values)
if callback is not None:
callback(gp)
print(gp.kernel_)
print('N', sampler.ntraining_samples, 'Error', error)
errors[sample_step] = error
nsamples[sample_step] = sampler.ntraining_samples
sample_step += 1
if flag > 0:
errors, nsamples = errors[:sample_step], nsamples[:sample_step]
print('Terminating study. Points are becoming ill conditioned')
break
if return_samples:
return errors, nsamples, sampler.training_samples
return errors, nsamples
def unnormalized_posterior(gp, prior_pdf, samples, temper_param=1):
prior_vals = prior_pdf(samples).squeeze()
gp_vals = gp.predict(samples.T).squeeze()
unnormalized_posterior_vals = prior_vals*np.exp(-gp_vals)**temper_param
return unnormalized_posterior_vals
class BayesianInferenceCholeskySampler(CholeskySampler):
def __init__(self, prior_pdf, num_vars,
num_candidate_samples, variables,
max_num_samples=None, generate_random_samples=None,
temper=True, true_nll=None):
self.prior_pdf = prior_pdf
if not temper:
self.temper_param = 1
else:
self.temper_param = 0
self.true_nll = true_nll
self.gp = None
super().__init__(num_vars, num_candidate_samples, variables,
None, generate_random_samples)
def set_gaussian_process(self, gp):
self.gp = gp
def increment_temper_param(self, num_training_samples):
samples = np.random.uniform(0, 1, (self.nvars, 1000))
density_vals_prev = self.weight_function(samples)
def objective(beta):
new_weight_function = partial(
unnormalized_posterior, self.gp, self.prior_pdf,
temper_param=beta)
density_vals = new_weight_function(samples)
II = np.where(density_vals_prev > 1e-15)[0]
JJ = np.where(density_vals_prev < 1e-15)[0]
assert len(np.where(density_vals[JJ] > 1e-15)[0]) == 0
ratio = np.zeros(samples.shape[1])
ratio[II] = density_vals[II]/density_vals_prev[II]
obj = ratio.std()/ratio.mean()
return obj
print('temper parameter', self.temper_param)
x0 = self.temper_param+1e-4
# result = root(lambda b: objective(b)-1, x0)
# x_opt = result.x
x_opt = bisect(lambda b: objective(b)-1, x0, 1)
self.temper_param = x_opt
def __call__(self, num_samples):
if self.gp is None:
raise ValueError("must call self.set_gaussian_process()")
if self.ntraining_samples > 0 and self.temper_param < 1:
self.increment_temper_param(self.training_samples)
assert self.temper_param <= 1
if self.ntraining_samples == 0:
weight_function = self.prior_pdf
else:
if self.true_nll is not None:
def weight_function(x): return self.prior_pdf(x)*np.exp(
-self.true_nll(x)[:, 0])**self.temper_param
else:
weight_function = partial(
unnormalized_posterior, self.gp, self.prior_pdf,
temper_param=self.temper_param)
self.set_weight_function(weight_function)
samples, flag = super().__call__(num_samples)
return samples, flag
def get_posterior_samples(num_vars, weight_function, nsamples):
x, w = get_tensor_product_quadrature_rule(
200, num_vars, np.polynomial.legendre.leggauss,
transform_samples=lambda x: (x+1)/2,
density_function=lambda x: 0.5*np.ones(x.shape[1]))
vals = weight_function(x)
C = 1/vals.dot(w)
def posterior_density(samples):
return weight_function(samples)*C
def proposal_density(samples):
return np.ones(samples.shape[1])
def generate_uniform_samples(nsamples):
return np.random.uniform(0, 1, (num_vars, nsamples))
def generate_proposal_samples(nsamples):
return np.random.uniform(0, 1, (num_vars, nsamples))
envelope_factor = C*vals.max()*1.1
rosenbrock_samples = rejection_sampling(
posterior_density, proposal_density,
generate_proposal_samples, envelope_factor,
num_vars, nsamples, verbose=True,
batch_size=None)
return rosenbrock_samples
def bayesian_inference_example():
init_scale = 0.1
num_vars = 2
num_candidate_samples = 10000
num_new_samples = np.asarray([20]+[5]*6+[25]*6+[50]*8)
nvalidation_samples = 10000
prior_pdf = partial(
tensor_product_pdf, univariate_pdfs=partial(stats.beta.pdf, a=1, b=1))
misfit_function = rosenbrock_function
def weight_function(samples):
prior_vals = prior_pdf(samples).squeeze()
misfit_vals = misfit_function(samples).squeeze()
vals = np.exp(-misfit_vals)*prior_vals
return vals
# Get validation samples from true posterior using rejection sampling
rosenbrock_samples = get_posterior_samples(
num_vars, weight_function, num_candidate_samples+nvalidation_samples)
def generate_random_samples(nsamples, idx=0):
assert idx+nsamples <= rosenbrock_samples.shape[1]
return rosenbrock_samples[:, idx:idx+nsamples]
generate_validation_samples = partial(
generate_random_samples, nvalidation_samples,
idx=num_candidate_samples)
# Must set variables if not using uniform prior on [0,1]^D
variables = None
def get_filename(method, fixed_scale):
filename = 'bayes-example-%s-d-%d-n-%d.npz' % (
method, num_vars, num_candidate_samples)
if not fixed_scale:
filename = filename[:-4]+'-opt.npz'
return filename
# defining kernel
length_scale = init_scale*np.ones(num_vars, dtype=float)
kernel = RBF(length_scale, (5e-2, 1))
# define quadrature rule to compute f divergence
div_type = 'hellinger'
quad_x, quad_w = get_tensor_product_quadrature_rule(
200, num_vars, np.polynomial.legendre.leggauss, transform_samples=None,
density_function=None)
quad_x = (quad_x+1)/2
quad_rule = quad_x, quad_w
fig, axs = plt.subplots(1, 3, figsize=(3*8, 6), sharey=False)
oracle_cholesky_sampler = CholeskySampler(
num_vars, num_candidate_samples, variables,
generate_random_samples=generate_random_samples)
oracle_cholesky_sampler.set_weight_function(weight_function)
oracle_cholesky_sampler.set_kernel(copy.deepcopy(kernel))
# to give prior an unfair but ultimately useless advantage
# use samples from poseterior as half of the candidates
prior_cholesky_sampler = CholeskySampler(
num_vars, num_candidate_samples, variables,
generate_random_samples=generate_random_samples)
prior_cholesky_sampler.set_weight_function(prior_pdf)
prior_cholesky_sampler.set_kernel(copy.deepcopy(kernel))
# this is the one Qian should use. The others are for comparision only
adaptive_cholesky_sampler = BayesianInferenceCholeskySampler(
prior_pdf, num_vars, num_candidate_samples, variables,
max_num_samples=num_new_samples.sum(),
generate_random_samples=None)
adaptive_cholesky_sampler.set_kernel(copy.deepcopy(kernel))
halton_sampler = HaltonSampler(num_vars, variables)
samplers = [oracle_cholesky_sampler, prior_cholesky_sampler,
adaptive_cholesky_sampler, halton_sampler][1:]
methods = ['Oracle-Weighted-Cholesky-b', 'Prior-Weighted-Cholesky-b',
'Learning-Weighted-Cholesky-b', 'Halton'][1:]
labels = [r'$\mathrm{Oracle\;Weighted\;Cholesky}$',
r'$\mathrm{Prior\;Weighted\;Cholesky}$',
r'$\mathrm{Adapted\;Weighted\;Cholesky}$',
r'$\mathrm{Halton}$'][1:]
fixed_scales = [True, False, False, False][1:]
for sampler, method, fixed_scale in zip(samplers, methods, fixed_scales):
filename = get_filename(method, fixed_scale)
print(filename)
if os.path.exists(filename):
continue
if fixed_scale:
update_kernel_scale_num_samples = np.empty(0)
else:
update_kernel_scale_num_samples = np.cumsum(num_new_samples)
divergences = []
cond_nums = []
temper_params = []
def callback(gp):
approx_density = partial(unnormalized_posterior, gp, prior_pdf)
exact_density = weight_function
error = compute_f_divergence(
approx_density, exact_density, quad_rule, div_type, True)
# print ('divergence',error)
divergences.append(error)
cond_nums.append(np.linalg.cond(gp.L_.dot(gp.L_.T)))
if hasattr(sampler, 'temper_param'):
temper_params.append(sampler.temper_param)
print(temper_params)
errors, nsamples, samples = convergence_study(
kernel, rosenbrock_function, sampler, num_vars,
generate_validation_samples, num_new_samples,
update_kernel_scale_num_samples, callback=callback,
return_samples=True)
np.savez(filename, nsamples=nsamples, errors=errors,
divergences=np.asarray(divergences),
cond_nums=np.asarray(cond_nums), samples=samples,
temper_params=np.asarray(temper_params))
styles = ['-', '--', '--', '--']
# styles = ['k-','r-.','b--','g:']
for method, label, ls, fixed_scale in zip(
methods, labels, styles, fixed_scales):
filename = get_filename(method, fixed_scale)
data = np.load(filename)
nsamples, errors = data['nsamples'], data['errors']
divergences, cond_nums = data['divergences'], data['cond_nums']
axs[0].loglog(nsamples, errors, ls=ls, label=label)
axs[1].loglog(nsamples, divergences, ls=ls, label=label)
axs[2].loglog(nsamples, cond_nums, ls=ls, label=label)
for ii in range(3):
axs[ii].set_xlabel(r'$m$')
axs[ii].set_xlim(10, 1000)
axs[0].set_ylabel(r'$\tilde{\epsilon}_{\omega,2}$', rotation=90)
ylim0 = axs[0].get_ylim()
ylim1 = axs[1].get_ylim()
ylim = [min(ylim0[0], ylim1[0]), max(ylim0[1], ylim1[1])]
axs[0].set_ylim(ylim)
axs[1].set_ylim(ylim)
axs[1].set_ylabel(r'$D_\mathrm{H}$', rotation=90)
axs[2].set_ylabel(r'$\kappa$', rotation=90)
figname = 'bayes_example_comparison_%d.pdf' % num_vars
axs[0].legend()
plt.savefig(figname)
method, fixed_scale = 'Learning-Weighted-Cholesky-b', False
filename = get_filename(method, fixed_scale)
print(filename)
adaptive_cholesky_samples = np.load(filename)['samples']
temper_params = np.load(filename)['temper_params']
nsamples = np.load(filename)['nsamples']
fig, axs = plt.subplots(1, 3, figsize=(3*8, 6))
cnt = 0
# plt.figure(figsize=(8,6))
# plt.semilogy(nsamples,temper_params)
axs[cnt].semilogy(np.arange(1, nsamples.shape[0]),
temper_params[1:], 'k-o')
axs[cnt].set_xlabel(r'$\mathrm{Iteration}$ $j$')
axs[cnt].set_ylabel(r'$\beta_j$')
cnt += 1
for ii in [6, -1]:
beta = temper_params[ii]
nn = nsamples[ii]
# should actually be using gp approximation of misfit for visualization
# here but true misfit is good enough for visualization
def weight_function(x): return prior_pdf(x).squeeze()*np.exp(
-misfit_function(x).squeeze())**beta
# plt.figure(figsize=(8,6))
plt_ranges = [0, 1, 0, 1]
X, Y, Z = get_meshgrid_function_data(weight_function, plt_ranges, 30)
pp = axs[cnt].contourf(X, Y, Z,
# levels=np.linspace(Z.min(),Z.max(),20),
levels=np.linspace(0, 1, 20),
cmap=mpl.cm.coolwarm)
axs[cnt].plot(
adaptive_cholesky_samples[0, :nn],
adaptive_cholesky_samples[1, :nn], 'ko')
axs[cnt].set_xlabel(r'$y_1$')
axs[cnt].set_ylabel(r'$y_2$')
cnt += 1
plt.colorbar(pp, ax=axs[cnt-1])
figname = 'bayes-example-temper-params.pdf'
plt.savefig(figname)
if __name__ == '__main__':
try:
import sklearn
except:
msg = 'Install sklearn using pip install sklearn'
raise Exception(msg)
bayesian_inference_example()
|
py | 1a523cffa00e030db5ada09e462f348f3b215df7 | """
Django settings for mblog project.
Generated by 'django-admin startproject' using Django 2.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '36=mp_fa==@zg9ukbe4z31alcp79q4bn9nz6sj!a+mk_4h5s8*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*',]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'markdown_deux',
'mainsite',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mblog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mblog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'zh-Hant'
TIME_ZONE = 'Asia/Taipei'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
] |
py | 1a523dccb4dea66bd095ebdbf90894fad4b56101 | import os
import numpy as np
import pandas as pd
import shap
from sklearn.model_selection import train_test_split
from .utils.plot import (
plot_results_1x2,
plot_results_2x2,
plot_shap_values,
plot_survival,
plot_sensitivity_specificity_vs_threshold
)
from .utils.preprocess import preprocess
from .utils.report import generate_experiment_report
from .run_experiment import run_experiment
pd.options.mode.chained_assignment = None
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_FPATH = os.path.join(BASE_DIR, "data", "covid19_internacao.csv")
def ventilation_prediction(df_final):
df_final = preprocess(df_final)
# Now we expect to prepare our training pipeline
features_display_names = [
("idade", "Age (years)"),
("seg_normal", "Healthy lungs (%)"),
("taxa_gordura", "Mediastinal fat (%)"),
("sofa_score", "SOFA score"),
("n_comorbidades", "Comorbidities"),
]
features = [
f[0] for f in features_display_names
]
target = "mechanical_ventilation"
# We select a small subset of features, and maybe there will be left some duplicates in the dataframe.
# We drop those duplicates.
df_model = df_final.drop_duplicates(subset=features + ["record_id", target])
# Train, validation and test split is in the patient level
df_split = df_model.groupby("record_id").agg({
"idade": lambda series: series.iloc[0],
"sexo_M": lambda series: series.iloc[0],
"instituicao": lambda series: series.iloc[0],
target: lambda series: series.iloc[0],
"obito": lambda series: series.iloc[0]
}).reset_index()
target_unknown = df_split[df_split[target].isna()].record_id.nunique()
df_split = df_split.dropna(subset=[target])
train_valid_records, test_records = train_test_split(
df_split, test_size=0.2, random_state=0, stratify=df_split[target]
)
assert len(set(train_valid_records.record_id.unique()) & set(test_records.record_id.unique())) == 0
summaries, df_test = run_experiment(df_model, train_valid_records, test_records, features, target)
X_test = df_test[features]
############################## Finished training the models ##############################
save_path_2x2 = os.path.join(BASE_DIR, "desfechos_intermediarios", "ventilation_model_2x2.tiff")
plot_results_2x2(summaries, save_path_2x2, fformat="tiff")
save_path_1x2 = os.path.join(BASE_DIR, "desfechos_intermediarios", "ventilation_model_1x2.tiff")
metrics_summary = plot_results_1x2(summaries, save_path_1x2, fformat="tiff")
save_path_shap = os.path.join(BASE_DIR, "desfechos_intermediarios", "ventilation_shap.tiff")
shap_values_plot = plot_shap_values(X_test, summaries, [f[1] for f in features_display_names], save_path_shap, fformat="tiff")
save_path_sens_spec = os.path.join(BASE_DIR, "desfechos_intermediarios", "ventilation_sens_spec.tiff")
plot_sensitivity_specificity_vs_threshold(summaries, save_path_sens_spec, fformat="tiff")
save_report = os.path.join(BASE_DIR, "desfechos_intermediarios", "ventilation_report.txt")
reports = generate_experiment_report(
"Mechanical Ventilation", target, df_split, df_final, features, metrics_summary,
train_valid_records, test_records, save_report
)
print(reports["stats_report"])
print(reports["missing_values"])
print(reports["metrics"])
############################## Survival analysis ##############################
save_path_survival = os.path.join(BASE_DIR, "desfechos_intermediarios", "ventilation_survival.tiff")
plot_survival(df_test, features, summaries, save_path_survival, fformat="tiff")
def ventilation_prediction_loio(df_final, institution):
""" Leave one institution out """
df_final = preprocess(df_final)
# The same institution might appear with different names, so we make a list with the names
assert isinstance(institution, str) or isinstance(institution, list), "'institution' must be either a string or a list"
if isinstance(institution, str):
institution = [institution]
# Now we expect to prepare our training pipeline
features_display_names = [
("idade", "Age (years)"),
("seg_normal", "Healthy lungs (%)"),
("taxa_gordura", "Mediastinal fat (%)"),
("sofa_score", "SOFA score"),
("n_comorbidades", "Comorbidities"),
]
features = [
f[0] for f in features_display_names
]
target = "mechanical_ventilation"
# We select a small subset of features, and maybe there will be left some duplicates in the dataframe.
# We drop those duplicates.
df_model = df_final.drop_duplicates(subset=features + ["record_id", target])
# Train, validation and test split is in the patient level
df_split = df_model.groupby("record_id").agg({
"idade": lambda series: series.iloc[0],
"sexo_M": lambda series: series.iloc[0],
"instituicao": lambda series: series.iloc[0],
target: lambda series: series.iloc[0],
"obito": lambda series: series.iloc[0]
}).reset_index()
target_unknown = df_split[df_split[target].isna()].record_id.nunique()
df_split = df_split.dropna(subset=[target])
# Leave institution out of the train/validation pipeline
train_valid_records = df_split[~df_split.instituicao.isin(institution)]
test_records = df_split[df_split.instituicao.isin(institution)]
assert len(set(train_valid_records.record_id.unique()) & set(test_records.record_id.unique())) == 0
summaries, df_test = run_experiment(df_model, train_valid_records, test_records, features, target)
X_test = df_test[features]
############################## Finished training the models ##############################
save_path_2x2 = os.path.join(BASE_DIR, "desfechos_intermediarios", "LOIO", f"{institution[0]}_ventilation_model_2x2.tiff")
plot_results_2x2(summaries, save_path_2x2, fformat="tiff")
save_path_1x2 = os.path.join(BASE_DIR, "desfechos_intermediarios", "LOIO", f"{institution[0]}_ventilation_model_1x2.tiff")
metrics_summary = plot_results_1x2(summaries, save_path_1x2, fformat="tiff")
save_path_shap = os.path.join(BASE_DIR, "desfechos_intermediarios", "LOIO", f"{institution[0]}_ventilation_shap.tiff")
shap_values_plot = plot_shap_values(X_test, summaries, [f[1] for f in features_display_names], save_path_shap, fformat="tiff")
save_path_sens_spec = os.path.join(BASE_DIR, "desfechos_intermediarios", "LOIO", f"{institution[0]}_ventilation_sens_spec.tiff")
plot_sensitivity_specificity_vs_threshold(summaries, save_path_sens_spec, fformat="tiff")
save_report = os.path.join(BASE_DIR, "desfechos_intermediarios", "LOIO", f"{institution[0]}_ventilation_report.txt")
reports = generate_experiment_report(
"Mechanical Ventilation", target, df_split, df_final, features, metrics_summary,
train_valid_records, test_records, save_report
)
print(reports["stats_report"])
print(reports["missing_values"])
print(reports["metrics"])
############################## Survival analysis ##############################
# save_path_survival = os.path.join(BASE_DIR, "desfechos_intermediarios", "LOIO", f"{institution[0]}_ventilation_survival.tiff")
# plot_survival(df_test, features, summaries, save_path_survival, fformat="tiff")
if __name__ == "__main__":
df_final = pd.read_csv(DATA_FPATH)
ventilation_prediction(df_final)
|
py | 1a523df296c140169f5f795ccc8c6067f78a3f65 | # Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.benchmark import datagen, algorithms
from cuml.benchmark.bench_helper_funcs import _training_data_to_numpy
from cuml.benchmark.runners import AccuracyComparisonRunner, \
SpeedupComparisonRunner, run_variations
from cuml.common.import_utils import has_umap
from cuml.common.import_utils import has_xgboost
import numpy as np
import cudf
import pytest
from numba import cuda
from sklearn import metrics
import pandas as pd
import time
@pytest.mark.parametrize('dataset', ['blobs', 'regression', 'classification'])
def test_data_generators(dataset):
data = datagen.gen_data(dataset, "numpy", n_samples=100, n_features=10)
assert isinstance(data[0], np.ndarray)
assert data[0].shape[0] == 100
@pytest.mark.parametrize('input_type',
['numpy', 'cudf', 'pandas', 'gpuarray', 'gpuarray-c'])
def test_data_generator_types(input_type):
X, *_ = datagen.gen_data('blobs', input_type, n_samples=100, n_features=10)
if input_type == 'numpy':
assert isinstance(X, np.ndarray)
elif input_type == 'cudf':
assert isinstance(X, cudf.DataFrame)
elif input_type == 'pandas':
assert isinstance(X, pd.DataFrame)
elif input_type == 'gpuarray':
assert cuda.is_cuda_array(X)
elif input_type == 'gpuarray-c':
assert cuda.is_cuda_array(X)
else:
assert False
def test_data_generator_split():
X_train, y_train, X_test, y_test = datagen.gen_data(
'blobs', 'numpy', n_samples=100, n_features=10, test_fraction=0.20
)
assert X_train.shape == (100, 10)
assert X_test.shape == (25, 10)
def test_run_variations():
algo = algorithms.algorithm_by_name("LogisticRegression")
res = run_variations(
[algo],
dataset_name="classification",
bench_rows=[100, 200],
bench_dims=[10, 20],
)
assert res.shape[0] == 4
assert (res.n_samples == 100).sum() == 2
assert (res.n_features == 20).sum() == 2
def test_speedup_runner():
class MockAlgo:
def __init__(self, t):
self.t = t
def fit(self, X, y):
time.sleep(self.t)
return
def predict(self, X):
nr = X.shape[0]
res = np.zeros(nr)
res[0:int(nr / 5.0)] = 1.0
return res
class FastMockAlgo(MockAlgo):
def __init__(self):
MockAlgo.__init__(self, 0.1)
class SlowMockAlgo(MockAlgo):
def __init__(self):
MockAlgo.__init__(self, 2)
pair = algorithms.AlgorithmPair(
SlowMockAlgo,
FastMockAlgo,
shared_args={},
name="Mock",
accuracy_function=metrics.accuracy_score,
)
runner = SpeedupComparisonRunner(
[20], [5], dataset_name='zeros'
)
results = runner.run(pair)[0]
expected_speedup = SlowMockAlgo().t / FastMockAlgo().t
assert results["speedup"] == pytest.approx(expected_speedup, 0.4)
def test_multi_reps():
class CountingAlgo:
tot_reps = 0
def fit(self, X, y):
CountingAlgo.tot_reps += 1
pair = algorithms.AlgorithmPair(
CountingAlgo,
CountingAlgo,
shared_args={},
name="Counting",
)
runner = AccuracyComparisonRunner(
[20], [5], dataset_name='zeros', test_fraction=0.20, n_reps=4
)
runner.run(pair)
# Double the n_reps since it is used in cpu and cuml versions
assert CountingAlgo.tot_reps == 8
def test_accuracy_runner():
# Set up data that should deliver accuracy of 0.20 if all goes right
class MockAlgo:
def fit(self, X, y):
return
def predict(self, X):
nr = X.shape[0]
res = np.zeros(nr)
res[0:int(nr / 5.0)] = 1.0
return res
pair = algorithms.AlgorithmPair(
MockAlgo,
MockAlgo,
shared_args={},
name="Mock",
accuracy_function=metrics.accuracy_score,
)
runner = AccuracyComparisonRunner(
[20], [5], dataset_name='zeros', test_fraction=0.20
)
results = runner.run(pair)[0]
assert results["cuml_acc"] == pytest.approx(0.80)
# Only test a few algorithms (which collectively span several types)
# to reduce runtime burden
@pytest.mark.parametrize('algo_name', ['UMAP-Supervised',
'DBSCAN',
'LogisticRegression',
'ElasticNet',
'FIL'])
def test_real_algos_runner(algo_name):
pair = algorithms.algorithm_by_name(algo_name)
if (algo_name == 'UMAP-Supervised' and not has_umap()) or \
(algo_name == 'FIL' and not has_xgboost()):
pytest.xfail()
runner = AccuracyComparisonRunner(
[20], [5], dataset_name='classification', test_fraction=0.20
)
results = runner.run(pair)[0]
print(results)
assert results["cuml_acc"] is not None
@pytest.mark.parametrize('input_type', ['numpy', 'cudf', 'pandas', 'gpuarray'])
def test_training_data_to_numpy(input_type):
X, y, *_ = datagen.gen_data(
'blobs', input_type, n_samples=100, n_features=10
)
X_np, y_np = _training_data_to_numpy(X, y)
assert isinstance(X_np, np.ndarray)
assert isinstance(y_np, np.ndarray)
|
py | 1a523e44ddd54e2472d9a80ccf6e0b6b789b5852 | paramwise_cfg = dict(
norm_decay_mult=0.0,
bias_decay_mult=0.0,
custom_keys={
'.absolute_pos_embed': dict(decay_mult=0.0),
'.relative_position_bias_table': dict(decay_mult=0.0)
})
# for batch in each gpu is 128, 8 gpu
# lr = 5e-4 * 128 * 8 / 512 = 0.001
optimizer = dict(
type='AdamW',
lr=5e-4 * 128 * 8 / 512,
weight_decay=0.05,
eps=1e-8,
betas=(0.9, 0.999),
paramwise_cfg=paramwise_cfg)
optimizer_config = dict(grad_clip=dict(max_norm=5.0))
# learning policy
lr_config = dict(
policy='CosineAnnealing',
by_epoch=False,
min_lr_ratio=1e-2,
warmup='linear',
warmup_ratio=1e-3,
warmup_iters=20 * 1252,
warmup_by_epoch=False)
runner = dict(type='EpochBasedRunner', max_epochs=300)
|
py | 1a523fa94248c968dbacf2b0e40db1bda3e99a89 | from django.urls import path, include
from rest_framework import urlpatterns
from rest_framework.routers import DefaultRouter
from recipe import views
router = DefaultRouter()
router.register('tag', views.TagViewSet)
router.register('ingredients', views.IngredientViewSet)
router.register('recipes', views.RecipeViewSet)
app_name = 'recipe'
urlpatterns = [
path('', include(router.urls))
]
|
py | 1a523fda8f91bb4eb01908204d4fd5c4e01467cf | from .store import FixityDocument as Doc
from .store import JSONSchemaCollection
from pprint import pprint
def get_schemas():
"""Get JSON schemas for FixityDocument
Returns:
JSONSchemaCollection: Object and document JSON schema that define the store
"""
schemas = JSONSchemaCollection(dict())
d1 = Doc()
d2 = Doc()
fname1 = d1.get_filename(document=True)
fname2 = d2.get_filename()
document_schema = d1.to_jsonschema(document=True)
object_schema = d2.to_jsonschema(document=False)
schemas[fname1] = document_schema
schemas[fname2] = object_schema
return schemas
|
py | 1a5240881d5eadba62f08b5a724e85931785a4b4 | """
Visualize the notes network of a Zettelkasten.
Each arrow represents a link from one zettel to another. The script assumes
that zettels have filenames of the form "YYYYMMDDHHMM This is a title" and that
links have the form [[YYYYMMDDHHMM]]
"""
import glob
import os.path
import re
from textwrap import fill
PAT_ZK_ID = re.compile(r"^(?P<id>\d+)\s(.*)")
PAT_LINK = re.compile(r"\[\[(\d+)\]\]")
def parse_zettels(filepaths):
""" Parse the ID and title from the filename and first line of the file.
Assumes that the filename has the format "This is title" and the first line
of the file is the ID
"""
documents = {}
for filepath in filepaths:
basename = os.path.basename(filepath)
filename, ext = os.path.splitext(basename)
# collect zkn_id
with open(filepath, encoding="utf-8") as f:
# read file
file_read = f.read()
# search for the first string of 14 digits with arbitrary
# non-digits on either side.
zkn_id = re.search('\d{14}', file_read)
zkn_id = zkn_id.group(0)
# collect links
links = PAT_LINK.findall(file_read)
# now collect text
with open(filepath, encoding='utf-8') as f:
f.readline()
doctext = f.readlines()
# document = dict(id=r.group(1), title=r.group(2), links=links)
# document = dict(id = zkn_id, title = filename, links = links, text = doctext)
# documents.append(document)
documents[zkn_id] = dict(title = filename, links = links, text = doctext)
return documents
def create_graph(zettels, include_self_references=True, only_listed=True):
"""
Create of graph of the zettels linking to each other.
Parameters
----------
zettels : list of dictionaries
include_self_references : bool, optional
Include links to the source document. Defaults to True.
only_listed : bool, optional
Only include nodes in the graph it's actually one of the zettels.
Default is False.
Returns
-------
graph : cytoscape-compatible set of elements
"""
# Collect IDs from source zettels and from zettels linked
zettel_ids = set()
link_ids = set()
for zettel in zettels:
zettel_ids.add(zettel["id"])
link_ids.update(zettel["links"])
if only_listed:
ids_to_include = zettel_ids
else:
ids_to_include = zettel_ids | link_ids
# for zettel in zettels:
# graph.add_node(zettel["id"], title=zettel["title"])
# for link in zettel["links"]:
# if link not in ids_to_include:
# continue
# if include_self_references or (zettel["id"] != link):
# # Truth table for the inclusion conditional
# # IS_SAME IS_DIFF (Is different ID)
# # INCLUDE T T
# # DON'T INCLUDE F T
# graph.add_edge(zettel["id"], link)
elements = []
for zettel in zettels:
# add node
elements.append({
'data': {'id': zettel['id'], 'label': zettel['title']}
})
# add link_ids
for link in zettel["links"]:
if link not in ids_to_include:
continue
if include_self_references or (zettel["id"] != link):
# Truth table for the inclusion conditional
# IS_SAME IS_DIFF (Is different ID)
# INCLUDE T T
# DON'T INCLUDE F T
elements.append({
'data': {'source': zettel['id'], 'target': link}
})
return elements
def list_zettels(notes_dir, pattern="*.md"):
"""
List zettels in a directory.
Parameters
----------
notes_dir : str
Path to the directory containing the zettels.
pattern : str (optional)
Pattern matching zettels. The default is '*.md'. If there are multiple
patterns, separate them with a |, such as in '*.md|*.txt'
"""
filepaths = []
for patt in pattern.split("|"):
filepaths.extend(glob.glob(os.path.join(notes_dir, patt)))
return sorted(filepaths)
def parse_args(args=None):
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument(
"--notes-dir", default=".", help="path to folder containin notes. [.]"
)
parser.add_argument(
"--output",
default="zettel-network",
help="name of output file. [zettel-network]",
)
parser.add_argument(
"--pattern",
action="append",
help=(
"pattern to match notes. You can repeat this argument to"
" match multiple file types. [*.md]"
),
)
parser.add_argument(
"--use-graphviz",
action="store_true",
default=False,
help="Use Graphviz instead of plotly to render the network.",
)
parser.add_argument(
"--no-self-ref",
action="store_false",
default=True,
dest="include_self_references",
help="Do not include self-references in a zettel.",
)
parser.add_argument(
"--only-listed",
action="store_true",
default=False,
help="Only include links to documents that are in the file list",
)
parser.add_argument("zettel_paths", nargs="*", help="zettel file paths.")
args = parser.parse_args(args=args)
# Use the list of files the user specify, otherwise, fall back to
# listing a directory.
if not args.zettel_paths:
if args.pattern is None:
args.pattern = ["*.md"]
patterns = "|".join(args.pattern)
args.zettel_paths = list_zettels(args.notes_dir, pattern=patterns)
return args
def main(args=None):
args = parse_args(args)
zettels = parse_zettels(args.zettel_paths)
# Fail in case we didn't find a zettel
if not zettels:
raise FileNotFoundError("I'm sorry, I couldn't find any files.")
graph = create_graph(
zettels,
graph,
include_self_references=args.include_self_references,
only_listed=args.only_listed,
)
if __name__ == "__main__":
import sys
try:
sys.exit(main())
except FileNotFoundError as e:
# Failed either because it didn't find any files or because Graphviz
# wasn't installed
sys.exit(e)
|
py | 1a524093cfce812b0d233200474079a91d1185de | from math import ceil
import pyrebase
import tkinter as tk
import os
import csv
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
from pathlib import Path
# configure firebase and GUI
from matplotlib.patches import Patch
config = {
"apiKey": "your api key",
"authDomain": "your auth domain",
"databaseURL": "None",
"projectId": "your project id",
"storageBucket": "your storage bucket",
"messagingSenderId": "your messagingSenderId":
"appId": "your appId",
"serviceAccount": "your_serviceAccount.json"
}
bar_colors1 = ['#edca82', '#097770', '#e0cdbe', '#a9c0a6', '#ddc3a5', '#e0a96d']
bar_colors_pdr = ['#7a68a0', '#77c593', '#316879', '#f7bdb0', '#7fd7cc', '#989796']
bar_colors_ber = ['#1d3c45', '#d2601a', '#fff1e1', '#aed6dc', '#ff9a8d', '#4a536b']
colors_RSSI_scenario = ['#505160', '#68829e', '#aebd38', '#598234', '#003b46', '#07575b', '#66a5ad']
cr_outer_colors = ['#fff1e1', '#aed6dc', '#ff9a8d', '#4a536b']
cr_inner_colors = ['#1d3c45', '#d2601a']
leg_labels = ['SF 8, BW 125 kHz', 'SF 10, BW 125 kHz', 'SF 12, BW 125 kHz',
'SF 8, BW 250 kHz', 'SF 10, BW 250 kHz', 'SF 12, BW 250 kHz']
HEIGHT = 820
WIDTH = 1300
frame_color = "#ecf241"
root = tk.Tk()
canvas = tk.Canvas(root, height=HEIGHT, width=WIDTH)
canvas.pack()
frame = tk.Frame(root, bg=frame_color, bd=5)
frame.place(relx=0.5, rely=0.02, relwidth=0.85, relheight=0.17, anchor="n")
label = tk.Label(frame, text="Choose experiment", font=("Arial", 20))
label.place(relwidth=1, relheight=0.4)
exp_param_names = ["Experiment Number", "Transmissions per Iteration", "Time between transmissions", # 0,1,2
"Transmission length", # 3
"Random seed for payload generation", "auto-incrementing transmission content", "Description", # 4, 5, 6
"Target distance", # 7
"Height of sender", "Height of receiver", "Environment", "state", "Frequencies", "Bandwidths", # 8, 9, 10, 11, 12, 13
"Spreading factors", # 14
"Coding rates", "Power settings", "Iterations", "Duration", "Start time", "Sender latitude", # 15, 16, 17, 18, 19, 20
"Sender longitude", "Sender altitude", # 21, 22
"Receiver latitude", "Receiver longitude", "Receiver altitude", "Actual distance", # 23, 24, 25, 26
"Altitude difference", # 27
"Temperature", "Humidity", "Pressure", "Clouds", "Sender orientation", # 28, 29, 30, 31, 32
"Receiver orientation" # 33
]
### table configuration
param_selection = [19, 28, 29, 30, 31]
column_labels = []
row_categories = ['Experiment', 'Distance', 'Scenario']
units = ["", "", "s", "", "", "", "", "m", "m", "m", "", "", "", "", "", "", "", "", "s", "", "", "", "m", "", "", "m", "m", "m", "°C", "%", "hPa", "", "", ""]
for i, param in enumerate(exp_param_names):
if i in param_selection:
column_labels.append(param)
modes = ['RSSI/SNR', 'PDR', 'BER', 'RSSI per scenario', 'CR', 'ETX/time-on-air/duty cycle', 'info tables']
import firebase_admin
from firebase_admin import credentials
cred = credentials.Certificate("your_serviceAccount.json")
firebase_admin.initialize_app(cred)
firebase = pyrebase.initialize_app(config)
storage = firebase.storage()
# get all experiment data from firebase if not already downloaded
files = storage.list_files()
for file in files:
exp_num = file.name.split('/')[0][10:]
p_1 = Path('experiment_data')
if not p_1.exists():
print("making experiment_data dir")
os.mkdir("experiment_data")
p_2 = Path('experiment_data/experiment' + exp_num)
if not p_2.exists():
os.mkdir("experiment_data/experiment" + exp_num)
path_info = "experiment" + exp_num + "/experiment_info" + exp_num + ".txt"
storage.child(path_info).download("experiment_data/experiment" + exp_num + "/experiment_info.txt")
path_data = "experiment" + exp_num + "/experiment_data" + exp_num + ".csv"
storage.child(path_data).download("experiment_data/experiment" + exp_num + "/experiment_data.csv")
OPTIONS = []
import glob
for file in (glob.glob("experiment_data/*")):
with open(file + "/experiment_info.txt", 'r') as txt:
lines = txt.readlines()
OPTIONS.append(lines[6][:-1] + ", " + lines[10][:-1] + " : " + file.split('\\')[
1]) # lines[6] contains description, lines[10] environment
OPTIONS = (sorted(OPTIONS))
drop_down_content = tk.StringVar(frame)
drop_down_content.set(OPTIONS[0]) # default value
mode = modes[0]
exp_counter = 0 # used if visualisation takes data from more than one experiment
label_exp_counter = tk.Label(frame, text='--', font=("Arial", 22))
label_exp_counter.place(relwidth=0.14, relheight=0.4, relx=0.86, rely=0.55)
# data arrays and other variables for different plots
pdr_arr = [[], [], [], [], [], []]
pdr_corr_arr = [[], [], [], [], [], []]
pdr_uncorr_arr = [[], [], [], [], [], []]
ber_arr = [[], [], [], [], [], []]
rssi_arr = []
rssi_arr_distances = []
snr_averaging_arr = []
rssi_arr_snr = []
etx_arr = [[], [], [], [], [], []]
info_arrays = []
# process and visualize data for selected experiment
def process_data(selection):
experiment_number = str(selection).split('t')[-1]
#########################################
########### get data ####################
p_1 = Path('experiment_data')
if not p_1.exists():
os.mkdir("experiment_data")
p_2 = Path('experiment_data/experiment' + experiment_number)
if not p_2.exists():
os.mkdir("experiment_data/experiment" + experiment_number)
path_info = "experiment" + experiment_number + "/experiment_info" + experiment_number + ".txt"
storage.child(path_info).download("experiment_data/experiment" + experiment_number + "/experiment_info.txt")
path_data = "experiment" + experiment_number + "/experiment_data" + experiment_number + ".csv"
storage.child(path_data).download("experiment_data/experiment" + experiment_number + "/experiment_data.csv")
overview_right = ""
overview_middle = ""
overview_left = ""
exp_params = {}
exp_params_arr = []
########################################################
########### visualise experiment information ###########
i = 0
with open('experiment_data/experiment' + experiment_number + '/experiment_info.txt', 'r') as file:
for line in file:
exp_params[i] = line.strip('\n')
if i in param_selection:
if i != 19 and i != 28:
exp_params_arr.append(line.strip('\n') + units[i])
else:
if i == 19:
ts = int(line.strip('\n'))
exp_params_arr.append(datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')[11:-3])
else:
exp_params_arr.append(str(round((float(line.strip('\n')) - 273.15), 1)) + units[i])
if i < 11:
overview_left += exp_param_names[i]
overview_left += ": " + line + "\n\n"
elif i < 22:
overview_middle += exp_param_names[i]
overview_middle += ": " + line + "\n\n"
else:
overview_right += exp_param_names[i]
overview_right += ": " + line + "\n\n"
i += 1
frame_overview_left = tk.Frame(root, bg=frame_color, bd=5)
frame_overview_left.place(relx=0.15, rely=0.21, relwidth=0.25, relheight=0.65, anchor="n")
label_descr_integration = tk.Label(frame_overview_left, anchor="n", justify=tk.LEFT, text=overview_left)
label_descr_integration.place(relwidth=1, relheight=1)
frame_overview_middle = tk.Frame(root, bg=frame_color, bd=5)
frame_overview_middle.place(relx=0.5, rely=0.21, relwidth=0.25, relheight=0.65, anchor="n")
label_descr_integration = tk.Label(frame_overview_middle, anchor="n", justify=tk.LEFT, text=overview_middle)
label_descr_integration.place(relwidth=1, relheight=1)
frame_overview_right = tk.Frame(root, bg=frame_color, bd=5)
frame_overview_right.place(relx=0.85, rely=0.21, relwidth=0.25, relheight=0.65, anchor="n")
label_descr_integration = tk.Label(frame_overview_right, anchor="n", justify=tk.LEFT, text=overview_right)
label_descr_integration.place(relwidth=1, relheight=1)
################################################
########### extract esperiment data ############
total_number_vals = []
identifier_vals = []
rssi_vals = []
snr_vals = []
bit_error_vals = []
last_row_total_number = 0;
with open("experiment_data/experiment" + experiment_number + "/experiment_data.csv", "r") as csv_file:
for row in csv.DictReader((x.replace('\0', '') for x in csv_file), delimiter=','):
total_number_val = row['totalnumber']
if total_number_val == last_row_total_number:
continue
total_number_vals.append(total_number_val)
last_row_total_number = total_number_val
identifier_vals.append(row['identifier'])
rssi_vals.append(int(row['rssi']))
snr_vals.append(int(row['snr']))
bit_error_vals.append(int(row['biterrors']))
#######################################################
############ aggregate results ########################
fr_code = int(exp_params[12])
bw_code = int(exp_params[13])
sf_code = int(exp_params[14])
cr_code = int(exp_params[15])
pw_code = int(exp_params[16])
number_of_transmissions = int(exp_params[1])
packet_length = int(exp_params[3])
print(packet_length)
label = ["", "", "", "", ""]
frequency_labels = ["869.5 MHz", "868.1 MHz", "868.3 MHz", "868.5 MHz", "867.1 MHz", "867.3 MHz", "867.5 MHz",
"867.7 MHz"]
bandwidth_labels = ["125kHz", "250kHz", "500kHz"]
coding_rate_labels = ["4/5", "4/6", "4/7", "4/8"]
iteration = 0
index = 0
rounds = []
rssi_iteration = []
snr_iteration = []
colors = []
global exp_counter
global rssi_arr
global rssi_arr_distances
global rssi_arr_snr
global snr_averaging_arr
distance = int(float(exp_params[26]))
cr_vals = []
cr_corrupted = []
if not rssi_arr_distances or rssi_arr_distances[-1] != distance:
rssi_arr.append([[], [], [], [], [], []])
rssi_arr_distances.append(int(float(exp_params[26])))
print(int(float(exp_params[26])))
if snr_averaging_arr:
rssi_arr_snr.append(np.mean(np.array(snr_averaging_arr))) # append the avg snr for one scenario distance
snr_averaging_arr = []
for i in range(8):
if ((fr_code >> i) & 1) == 1:
label[0] = "Frequency: " + frequency_labels[i]
else:
continue
for j in range(3):
if ((bw_code >> j) & 1) == 1:
label[1] = "Bandwidth: " + bandwidth_labels[j]
else:
continue
for k in range(6):
if ((sf_code >> k) & 1) == 1:
label[2] = "Spreading Factor: " + str(k + 7)
else:
continue
for l in range(4):
if ((cr_code >> l) & 1) == 1:
label[3] = "Coding Rate: " + coding_rate_labels[l]
else:
continue
for m in range(8):
if ((pw_code >> m) & 1) == 1:
label[4] = "Power: " + str(18 - m * 2)
rssi_sum = 0
biterror_sum = 0
corrupted_msgs_count = 0
for n in range(number_of_transmissions):
if index > len(rssi_vals) - 1:
break
if n == int(total_number_vals[index]) - (
iteration * number_of_transmissions): # once the loop is at the right x value
rounds.append(n)
rssi_val = rssi_vals[index]
rssi_iteration.append(rssi_val)
snr_iteration.append(rssi_val - snr_vals[index])
rssi_sum += rssi_val
biterror_sum += bit_error_vals[index]
if 'f' not in identifier_vals[index]:
color = bar_colors1[1]
else:
color = '#f07c3a'
corrupted_msgs_count += 1
index += 1
colors.append(color)
else:
continue
iteration += 1
if (len(rssi_iteration) > 0):
rssi_avg = rssi_sum / len(rssi_iteration)
if corrupted_msgs_count > 0:
bit_err_rate = biterror_sum / (corrupted_msgs_count * packet_length * 8)
else:
bit_err_rate = 0
pdr = len(rssi_iteration) / number_of_transmissions
pdr_corr = corrupted_msgs_count / number_of_transmissions
uncorr_count = (len(rssi_iteration) - corrupted_msgs_count)
pdr_uncorr = uncorr_count / number_of_transmissions
etx = number_of_transmissions / (len(rssi_iteration) - corrupted_msgs_count) if uncorr_count > 0 else 0
else:
rssi_avg = 0
bit_err_rate = 0
pdr = 0
etx = 0
pdr_corr = 0
pdr_uncorr = 0
####################################################################################################
########## mode to illustrate every iterations received messages as RSSI values with noise floor####
if mode == modes[0]:
plt.scatter(rounds, rssi_iteration, color=colors)
plt.plot(rounds, snr_iteration, color=bar_colors1[3], label=('noise floor'));
plt.scatter([], [], color=bar_colors1[1], label='rssi value of correct messages')
plt.scatter([], [], color='#f07c3a', label='rssi value of corrupted messages')
plt.xlabel('message number', fontsize=12)
plt.ylabel('dBm', fontsize=12)
label_str = ""
for p in label:
label_str += p + ", "
label_str += '\naverage RSSI: ' + str(
round(rssi_avg, 2)) + ',\npacket delivery rate: ' + str(round(
pdr, 2)) + '\naverage biterror rate: ' + str(
round(bit_err_rate, 4))
#plt.title(label_str, fontsize=6)
plt.legend(framealpha=0.4, prop={'size': 7})
plt.savefig('plots/below_noise_floor_corr.png', bbox_inches="tight", dpi=300)
plt.show()
#######################################################################################
############ mode to illustrate PDR with error bar over several experiments ###########
########### (preparation while looping through single experiments' iterations #########
if mode == modes[1]:
global pdr_arr
global pdr_corr_arr
global pdr_uncorr_arr
pdr_arr[iteration - 1].append(pdr)
pdr_corr_arr[iteration - 1].append(pdr_corr)
pdr_uncorr_arr[iteration - 1].append(pdr_uncorr)
##########################################################################################
############ mode to illustrate bit error rate with error bar over several experiments ###
########### (preparation while looping through single experiments' iterations ############
if mode == modes[2]:
global ber_arr
ber_arr[iteration - 1].append(bit_err_rate)
##########################################################################################
############ mode to illustrate rssi for several ranges with several LoRa parameter ######
############ combinations with error bar over several experiments for a whole scenario ###
########### (preparation while looping through single experiments' iterations ############
if mode == modes[3]:
if rssi_avg != 0:
rssi_arr[len(rssi_arr_distances) - 1][iteration - 1].append(rssi_avg)
snr_averaging_arr += snr_iteration
##########################################################################################
############ mode to illustrate difference in coding rates for one LoRa parameter #######
############ combination as pie chart ####################################################
if mode == modes[4]:
if len(rssi_iteration) != 0:
cr_vals.append([len(rssi_iteration) - corrupted_msgs_count, corrupted_msgs_count])
cr_corrupted.append('CR 4/' + str(((iteration - 1) % 4) + 5) + "\n" + str(int((corrupted_msgs_count / len(rssi_iteration)) * 100)) + "% corrupted")
if iteration % 4 == 0:
fig2, ax2 = plt.subplots()
size = 0.3
vals = np.array(cr_vals)
if iteration == 16:
ax2.pie(vals.sum(axis=1), radius=1, colors=['#aed6dc'], labels=cr_corrupted,
wedgeprops=dict(width=size, edgecolor='w'))
else:
ax2.pie(vals.sum(axis=1), radius=1, colors=cr_outer_colors, labels=cr_corrupted,
wedgeprops=dict(width=size, edgecolor='w'))
ax2.pie(vals.flatten(), radius=1 - size, colors=cr_inner_colors,
wedgeprops=dict(width=size, edgecolor='w'))
ax2.set(aspect="equal")
legend_elements = [Patch(facecolor=cr_inner_colors[0], label='correct messages'),
Patch(facecolor=cr_inner_colors[1], label='corrupted messages')]
plt.legend(handles=legend_elements, bbox_to_anchor=(1.2, 1.065), loc='upper right')
plt.savefig('plots/test_CR' + str(iteration) + '.png', bbox_inches="tight", dpi=300)
plt.show()
cr_vals = []
cr_corrupted = []
#######################################################################################
############ mode to illustrate ETX with error bar over several experiments ##########
########### (preparation while looping through single experiments' iterations #########
if mode == modes[5]:
global etx_arr
etx_arr[iteration - 1].append(etx)
rounds = []
rssi_iteration = []
snr_iteration = []
colors = []
print(mode)
##############################################################
######### plot for PDR mode ##################################
if mode == modes[1]:
exp_counter += 1
label_exp_counter.config(text='choose\n' + str(exp_counter + 1) + '. experiment', font=("Arial", 12))
if exp_counter == 3:
exp_counter = 0
label_exp_counter.config(text=str(exp_counter))
string_labels = ['8/1', '10/1', '12/1', '8/2', '10/2', '12/2']
leg_labels = ['SF 8, BW 125 kHz', 'SF 10, BW 125 kHz', 'SF 12, BW 125 kHz',
'SF 8, BW 250 kHz', 'SF 10, BW 250 kHz', 'SF 12, BW 250 kHz']
means_pdr = []
stds_pdr = []
means_pdr_corr = []
stds_pdr_corr = []
means_pdr_uncorr = []
stds_pdr_uncorr = []
for arr in pdr_arr:
np_arr = np.array(arr)
means_pdr.append(np.mean(np_arr))
stds_pdr.append(np.std(np_arr))
for arr in pdr_corr_arr:
np_arr = np.array(arr)
means_pdr_corr.append(np.mean(np_arr))
stds_pdr_corr.append(np.std(np_arr))
for arr in pdr_uncorr_arr:
np_arr = np.array(arr)
means_pdr_uncorr.append(np.mean(np_arr))
stds_pdr_uncorr.append(np.std(np_arr))
width = 0.8
######### plot pdr ##########
fig1, ax1 = plt.subplots(figsize=(5, 7))
ax1.bar(string_labels, means_pdr, width, yerr=stds_pdr, color=bar_colors_pdr, capsize=4,
label=leg_labels)
ax1.set_ylim(0, 1.2)
ax1.set_ylabel('PDR', fontsize=20)
ax1.set_xticks(np.arange(len(pdr_arr)))
ax1.tick_params(axis='both', labelsize=16)
ax1.tick_params(direction='in', bottom=True, left=True, right=True, top=True, width=2)
plt.setp(ax1.spines.values(), linewidth=2)
plt.tight_layout()
legend_elements = []
for e in range(6):
legend_elements.append(Patch(facecolor=bar_colors_pdr[e], label=leg_labels[e]))
plt.legend(handles=legend_elements, loc='upper right', framealpha=0.4,
prop={'size': 7})
plt.savefig('plots/sea_pdr1.png', bbox_inches="tight", dpi=400)
plt.show()
########## plot with distinguishing corrupted/uncorrupted messages #########
fig2, ax2 = plt.subplots(figsize=(5, 7))
ax2.bar(string_labels, means_pdr_uncorr, width, yerr=stds_pdr_uncorr, color=bar_colors_pdr, capsize=4, label='uncorrupted')
ax2.bar(string_labels, means_pdr_corr, width, error_kw=dict(ecolor='#a89894', lw=2, capsize=4, capthick=1),
yerr=stds_pdr_corr, bottom=means_pdr_uncorr, color='#fc6265', label='corrupted')
ax2.set_ylim(0, 1.1)
ax2.set_ylabel('PDR', fontsize=20)
ax2.set_xticks(np.arange(len(pdr_arr)))
ax2.tick_params(axis='both', labelsize=16)
ax2.tick_params(direction='in', bottom=True, left=True, right=True, top=True, width=2)
plt.setp(ax2.spines.values(), linewidth=2)
plt.tight_layout()
legend_elements.append(Patch(facecolor='#fc6265', label='corrupted part'))
plt.legend(handles=legend_elements, loc='lower right', framealpha=0.4,
prop={'size': 7})
plt.savefig('plots/2700_cityL_pdr2.png', bbox_inches="tight", dpi=400)
plt.show()
pdr_arr = [[], [], [], [], [], []]
pdr_corr_arr = [[], [], [], [], [], []]
pdr_uncorr_arr = [[], [], [], [], [], []]
##############################################################
######### plot for BER mode ##################################
if mode == modes[2]:
exp_counter += 1
label_exp_counter.config(text='choose\n' + str(exp_counter + 1) + '. experiment', font=("Arial", 12))
if exp_counter == 3:
exp_counter = 0
label_exp_counter.config(text=str(exp_counter))
leg_labels = ['SF 8, BW 125 kHz', 'SF 10, BW 125 kHz', 'SF 12, BW 125 kHz',
'SF 8, BW 250 kHz', 'SF 10, BW 250 kHz', 'SF 12, BW 250 kHz']
fig2, ax2 = plt.subplots(figsize=(5, 7))
location = 0
for arr in ber_arr:
np_arr = np.array(arr)
ax2.bar(location, np.mean(np_arr), yerr=np.std(np_arr), align='center', ecolor='black',
label=leg_labels[location], capsize=7, color=bar_colors_ber[location])
location += 1
ax2.set_yscale('linear')
ax2.set_ylim(0, 0.25)
ax2.set_ylabel('BER', fontsize=20)
ax2.set_xticks(np.arange(len(ber_arr)))
empty_string_labels = ['8/1', '10/1', '12/1', '8/2', '10/2', '12/2']
ax2.set_xticklabels(empty_string_labels)
ax2.tick_params(axis='both', labelsize=16)
ax2.tick_params(direction='in', bottom=True, left=True, right=True, top=True, width=2)
plt.setp(ax2.spines.values(), linewidth=2)
plt.tight_layout()
plt.legend(loc='upper right', framealpha=0.4, prop={'size': 7})
plt.savefig('plots/ber_500_forest.png', bbox_inches="tight", dpi=400)
plt.show()
ber_arr = [[], [], [], [], [], []]
##############################################################
######### plot for RSSI/range mode ##################################
if mode == modes[3]:
exp_counter += 1
label_exp_counter.config(text='choose\n' + str(exp_counter + 1) + '. experiment', font=("Arial", 12))
if exp_counter == 12:
exp_counter = 0
label_exp_counter.config(text=str(exp_counter))
rssi_arr_snr.append(np.mean(np.array(snr_averaging_arr))) # append the avg snr for one scenario distance
snr_averaging_arr = []
fig2 = plt.figure()
x_arr = np.array([200, 400, 600, 800])
# x_arr = np.array(rssi_arr_distances) (instead of above line for other representation)
leg_labels = ['SF 8, BW 125 kHz', 'SF 10, BW 125 kHz', 'SF 12, BW 125 kHz',
'SF 8, BW 250 kHz', 'SF 10, BW 250 kHz', 'SF 12, BW 250 kHz']
zero_size_dev_arrays = []
for graph in range(6):
y_arr = []
y_err_arr = []
for distance in range(len(rssi_arr)):
dev_arr = np.array(rssi_arr[distance][graph])
if dev_arr.size != 0:
y_arr.append(np.mean(dev_arr))
y_err_arr.append(np.std(dev_arr))
else:
zero_size_dev_arrays.append(distance)
y = np.array(y_arr)
yerr_graph = np.array(y_err_arr)
x_arr_sub = np.delete(x_arr, zero_size_dev_arrays)
zero_size_dev_arrays = []
lines = {'linestyle': 'None'} # remove for other representation
plt.rc('lines', **lines) # remove for other representation
plt.errorbar(x_arr_sub + 9*graph, y, yerr=yerr_graph, elinewidth=5, color=colors_RSSI_scenario[graph], label=leg_labels[graph])
# plt.errorbar(x_arr_sub, y, yerr=yerr_graph, color=colors_RSSI_scenario[graph], label=leg_labels[graph]) (instead of above line for other representation)
plt.plot(x_arr + 18, rssi_arr_snr, color=colors_RSSI_scenario[6], label='noise floor', linestyle='--',
marker='o');
plt.xlabel('distance', fontsize=12)
plt.ylabel('dBm', fontsize=12)
plt.legend(loc='lower right', framealpha=0.4, prop={'size': 7})
plt.savefig('plots/test_RSSI_per_scenario_nonelev.png', bbox_inches="tight", dpi=300)
plt.show()
rssi_arr = []
rssi_arr_distances = []
rssi_arr_snr = []
##############################################################
######### plot for ETX mode ##################################
if mode == modes[5]:
exp_counter += 1
label_exp_counter.config(text='choose\n' + str(exp_counter + 1) + '. experiment', font=("Arial", 12))
if exp_counter == 3:
exp_counter = 0
label_exp_counter.config(text=str(exp_counter))
leg_labels = ['SF 8, BW 125 kHz', 'SF 10, BW 125 kHz', 'SF 12, BW 125 kHz',
'SF 8, BW 250 kHz', 'SF 10, BW 250 kHz', 'SF 12, BW 250 kHz']
fig2, ax2 = plt.subplots(figsize=(5, 7))
location = 0
sf_s = [8, 10, 12, 8, 10, 12]
bw_s = [125, 125, 125, 250, 250, 250]
for i, arr in enumerate(etx_arr):
time_on_air = 8 * packet_length - 4 * sf_s[i] + 28 + 16
ldr_opt = (sf_s[i] == 11 and bw_s[i] == 125) or (sf_s[i] == 12)
time_on_air /= (4 * sf_s[i] - 2) if ldr_opt else (4 * sf_s[i])
time_on_air = ceil(time_on_air) if (time_on_air > 0) else 0
time_on_air = time_on_air * (2 + 4) + 8;
symbol_duration = (1 << sf_s[i]) / bw_s[i]
time_on_air *= symbol_duration
time_on_air += 12.25 * symbol_duration
print("time-on-air: " + str(time_on_air))
comb_arr = []
for x in arr:
# time for one transmission (first transmission is duty cycle penalty free) +
# additional time for waiting in compliance with duty cycle limitations (for 869.5 MHz)
if x != 0:
comb_arr.append(time_on_air + ((x - 1) * time_on_air * 10))
np_arr = np.array(comb_arr)
mean = 0
std = 0
if np_arr.size != 0:
mean = np.mean(np_arr)
std = np.std(np_arr)
if np_arr.size < 3:
std = 100000 # 'infinity' error bar if some subexperiments received nothing (for them it would take infinitely long to receive a message), but others did
if mean == 0:
ax2.bar(location, mean, align='center', ecolor='black',
label=leg_labels[location], capsize=7, color=colors_RSSI_scenario[location])
else:
ax2.bar(location, mean, yerr=std, align='center', ecolor='black',
label=leg_labels[location], capsize=7, color=colors_RSSI_scenario[location])
if np_arr.size < 3 and np_arr.size != 0: # change color for 'infinite' error bar to distinguish from other error bars going over whole screen
ax2.bar(location, mean,error_kw = dict(ecolor='#a89894', lw=2, capsize=4, capthick=1), yerr=std, align='center', ecolor='black',
capsize=7, color=colors_RSSI_scenario[location])
location += 1
ax2.set_yscale('linear')
ax2.set_ylim(0, 2000)
ax2.set_ylabel('Time for successful transmission in ms', fontsize=20)
ax2.set_xticks(np.arange(len(ber_arr)))
empty_string_labels = ['8/1', '10/1', '12/1', '8/2', '10/2', '12/2']
ax2.set_xticklabels(empty_string_labels)
ax2.tick_params(axis='both', labelsize=16)
ax2.tick_params(direction='in', bottom=True, left=True, right=True, top=True, width=2)
plt.setp(ax2.spines.values(), linewidth=2)
plt.tight_layout()
plt.legend(loc='upper right', numpoints=1, framealpha=0.4, prop={'size': 7})
plt.savefig('plots/30_cityL_etx2.png', bbox_inches="tight", dpi=300)
plt.show()
etx_arr = [[], [], [], [], [], []]
##############################################################
######### plot tables ##################################
if mode == modes[6]:
row_category = 0
exp_counter += 1
label_exp_counter.config(text='choose\n' + str(exp_counter + 1) + '. ' + row_categories[row_category], font=("Arial", 12))
info_arrays.append(exp_params_arr)
if exp_counter == 4:
exp_counter = 0
label_exp_counter.config(text=str(exp_counter))
row_labels = []
for i, arr in enumerate(info_arrays):
row_labels.append(row_categories[row_category] + ' ' + str(i + 1))
row_colors = plt.cm.BuPu(np.full(len(row_labels), 0.1))
col_colors = plt.cm.BuPu(np.full(len(column_labels), 0.1))
plt.figure(linewidth=2, figsize=(15,2))
table = plt.table(cellText=info_arrays, rowLabels=row_labels, rowColours=row_colors,
rowLoc='right', colColours=col_colors, colLabels=column_labels,
loc='center')
table.scale(1, 1.5)
ax = plt.gca()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.box(on=None)
plt.savefig('plots/table_test.png', bbox_inches="tight", dpi=300)
plt.show()
# triggered when choosing a mode from drop down menu
def set_mode(selection):
global mode
mode = selection
if mode != modes[0] and mode != modes[4]:
label_exp_counter.config(text='choose\n' + str(exp_counter + 1) + '. experiment', font=("Arial", 12))
else:
label_exp_counter.config(text='--', font=("Arial", 22))
if mode == modes[6]:
global info_arrays
info_arrays = []
# GUI dropdown experiments
from tkinter import font as tkFont
# experiment drop down menu
drop_down = tk.OptionMenu(frame, drop_down_content, *OPTIONS, command=process_data)
helv20 = tkFont.Font(family='Helvetica', size=14)
drop_down.config(font=helv20)
drop_down.place(rely=0.5, relx=0.01, relwidth=0.57, relheight=0.5)
OPTIONS2 = []
# mode drop down menu
for x in range(0, len(modes)):
OPTIONS2.append(modes[x])
drop_down_content2 = tk.StringVar(frame)
drop_down_content2.set(OPTIONS2[0]) # default value
drop_down_mode = tk.OptionMenu(frame, drop_down_content2, *OPTIONS2, command=set_mode)
helv20 = tkFont.Font(family='Helvetica', size=14)
drop_down_mode.config(font=helv20)
drop_down_mode.place(rely=0.5, relx=0.59, relwidth=0.25, relheight=0.5)
root.mainloop()
|
py | 1a5241b2f3bcfae4c48fa65becd16a80784ceeca | #!/usr/bin/python3
"""Test textlib module."""
#
# (C) Pywikibot team, 2011-2022
#
# Distributed under the terms of the MIT license.
#
import codecs
import functools
import os
import re
import unittest
from collections import OrderedDict
from contextlib import suppress
from unittest import mock
import pywikibot
from pywikibot import textlib
from pywikibot.backports import nullcontext
from pywikibot.exceptions import UnknownSiteError
from pywikibot.site._interwikimap import _IWEntry
from pywikibot.textlib import MultiTemplateMatchBuilder, extract_sections
from pywikibot.tools import has_module
from tests.aspects import (
DefaultDrySiteTestCase,
SiteAttributeTestCase,
TestCase,
require_modules,
)
files = {}
dirname = os.path.join(os.path.dirname(__file__), 'pages')
for f in ['enwiki_help_editing']:
with codecs.open(os.path.join(dirname, f + '.page'),
'r', 'utf-8') as content:
files[f] = content.read()
class TestSectionFunctions(TestCase):
"""Test wikitext section handling function."""
net = False
def setUp(self):
"""Setup tests."""
self.catresult1 = '[[Category:Cat1]]\n[[Category:Cat2]]\n'
super().setUp()
@staticmethod
def contains(fn, sn):
"""Invoke does_text_contain_section()."""
return textlib.does_text_contain_section(
files[fn], sn)
def assertContains(self, fn, sn, *args, **kwargs):
"""Test that files[fn] contains sn."""
self.assertEqual(self.contains(fn, sn), True, *args, **kwargs)
def assertNotContains(self, fn, sn, *args, **kwargs):
"""Test that files[fn] does not contain sn."""
self.assertEqual(self.contains(fn, sn), False, *args, **kwargs)
def testCurrentBehaviour(self):
"""Test that 'Editing' is found."""
self.assertContains('enwiki_help_editing', 'Editing')
def testSpacesInSection(self):
"""Test with spaces in section."""
self.assertContains('enwiki_help_editing', 'Minor_edits')
self.assertNotContains('enwiki_help_editing', '#Minor edits',
"Incorrect, '#Minor edits' does not work")
self.assertNotContains('enwiki_help_editing', 'Minor Edits',
'section hashes are case-sensitive')
self.assertNotContains('enwiki_help_editing', 'Minor_Edits',
'section hashes are case-sensitive')
@unittest.expectedFailure # TODO: T133276
def test_encoded_chars_in_section(self):
"""Test encoded chars in section."""
self.assertContains(
'enwiki_help_editing', 'Talk_.28discussion.29_pages',
'As used in the TOC')
def test_underline_characters_in_section(self):
"""Test with underline chars in section."""
self.assertContains('enwiki_help_editing', 'Talk_(discussion)_pages',
'Understood by mediawiki')
def test_spaces_outside_section(self):
"""Test with spaces around section."""
self.assertContains('enwiki_help_editing', 'Naming and_moving')
self.assertContains('enwiki_help_editing', ' Naming and_moving ')
self.assertContains('enwiki_help_editing', ' Naming and_moving_')
def test_link_in_section(self):
"""Test with link inside section."""
# section is ==[[Wiki markup]]==
self.assertContains('enwiki_help_editing', '[[Wiki markup]]',
'Link as section header')
self.assertContains('enwiki_help_editing', '[[:Wiki markup]]',
'section header link with preleading colon')
self.assertNotContains('enwiki_help_editing', 'Wiki markup',
'section header must be a link')
# section is ===[[:Help]]ful tips===
self.assertContains('enwiki_help_editing', '[[Help]]ful tips',
'Containing link')
self.assertContains('enwiki_help_editing', '[[:Help]]ful tips',
'Containing link with preleading colon')
self.assertNotContains('enwiki_help_editing', 'Helpful tips',
'section header must contain a link')
class TestFormatInterwiki(TestCase):
"""Test format functions."""
family = 'wikipedia'
code = 'en'
cached = True
def test_interwiki_format_Page(self):
"""Test formatting interwiki links using Page instances."""
interwikis = {
'de': pywikibot.Page(pywikibot.Link('de:German', self.site)),
'fr': pywikibot.Page(pywikibot.Link('fr:French', self.site))
}
self.assertEqual('[[de:German]]\n[[fr:French]]\n',
textlib.interwikiFormat(interwikis, self.site))
def test_interwiki_format_Link(self):
"""Test formatting interwiki links using Page instances."""
interwikis = {
'de': pywikibot.Link('de:German', self.site),
'fr': pywikibot.Link('fr:French', self.site),
}
self.assertEqual('[[de:German]]\n[[fr:French]]\n',
textlib.interwikiFormat(interwikis, self.site))
class TestFormatCategory(DefaultDrySiteTestCase):
"""Test category formatting."""
catresult = '[[Category:Cat1]]\n[[Category:Cat2]]\n'
def test_category_format_raw(self):
"""Test formatting categories as strings formatted as links."""
self.assertEqual(self.catresult,
textlib.categoryFormat(['[[Category:Cat1]]',
'[[Category:Cat2]]'],
self.site))
def test_category_format_bare(self):
"""Test formatting categories as strings."""
self.assertEqual(self.catresult,
textlib.categoryFormat(['Cat1', 'Cat2'], self.site))
def test_category_format_Category(self):
"""Test formatting categories as Category instances."""
data = [pywikibot.Category(self.site, 'Cat1'),
pywikibot.Category(self.site, 'Cat2')]
self.assertEqual(self.catresult,
textlib.categoryFormat(data, self.site))
def test_category_format_Page(self):
"""Test formatting categories as Page instances."""
data = [pywikibot.Page(self.site, 'Category:Cat1'),
pywikibot.Page(self.site, 'Category:Cat2')]
self.assertEqual(self.catresult,
textlib.categoryFormat(data, self.site))
class TestAddText(DefaultDrySiteTestCase):
"""Test add_text function."""
def test_add_text(self):
"""Test adding text."""
self.assertEqual(
textlib.add_text('foo\n[[Category:Foo]]', 'bar', site=self.site),
'foo\nbar\n\n[[Category:Foo]]'
)
class TestCategoryRearrangement(DefaultDrySiteTestCase):
"""
Ensure that sorting keys are not being lost.
Tests .getCategoryLinks() and .replaceCategoryLinks(),
with both a newline and an empty string as separators.
"""
old = '[[Category:Cat1]]\n[[Category:Cat2|]]\n' \
'[[Category:Cat1| ]]\n[[Category:Cat2|key]]'
def test_standard_links(self):
"""Test getting and replacing categories."""
cats = textlib.getCategoryLinks(self.old, site=self.site)
new = textlib.replaceCategoryLinks(self.old, cats, site=self.site)
self.assertEqual(self.old, new)
def test_indentation(self):
"""Test indentation from previous block."""
# Block of text
old = 'Some text\n\n' + self.old
cats = textlib.getCategoryLinks(old, site=self.site)
new = textlib.replaceCategoryLinks(old, cats, site=self.site)
self.assertEqual(old, new)
# DEFAULTSORT
old_ds = '{{DEFAULTSORT:key}}\n' + self.old
cats_ds = textlib.getCategoryLinks(old_ds, site=self.site)
new_ds = textlib.replaceCategoryLinks(old_ds, cats_ds, site=self.site)
self.assertEqual(old_ds, new_ds)
def test_in_place_replace(self):
"""Test in-place category change is reversible."""
dummy = pywikibot.Category(self.site, 'foo')
dummy.sortKey = 'bah'
cats = textlib.getCategoryLinks(self.old, site=self.site)
for count, cat in enumerate(textlib.getCategoryLinks(self.old,
site=self.site)):
with self.subTest(category=cat):
# Sanity checking
temp = textlib.replaceCategoryInPlace(self.old, cat, dummy,
site=self.site)
self.assertNotEqual(temp, self.old)
new = textlib.replaceCategoryInPlace(temp, dummy, cat,
site=self.site)
self.assertEqual(self.old, new)
self.assertEqual(count, 3)
# Testing removing categories
temp = textlib.replaceCategoryInPlace(self.old, cats[0],
None, site=self.site)
self.assertNotEqual(temp, self.old)
temp_cats = textlib.getCategoryLinks(temp, site=self.site)
self.assertNotIn(cats[0], temp_cats)
# First and third categories are the same
self.assertEqual([cats[1], cats[3]], temp_cats)
# Testing adding categories
temp = textlib.replaceCategoryInPlace(
self.old, cats[0], cats[1], site=self.site,
add_only=True)
self.assertNotEqual(temp, self.old)
temp_cats = textlib.getCategoryLinks(temp, site=self.site)
self.assertEqual([cats[0], cats[1], cats[1],
cats[2], cats[1], cats[3]], temp_cats)
new_cats = textlib.getCategoryLinks(new, site=self.site)
self.assertEqual(cats, new_cats)
def test_in_place_retain_sort(self):
"""Test in-place category change does not alter the sortkey."""
# sort key should be retained when the new cat sortKey is None
dummy = pywikibot.Category(self.site, 'foo')
self.assertIsNone(dummy.sortKey)
cats = textlib.getCategoryLinks(self.old, site=self.site)
self.assertEqual(cats[3].sortKey, 'key')
orig_sortkey = cats[3].sortKey
temp = textlib.replaceCategoryInPlace(self.old, cats[3],
dummy, site=self.site)
self.assertNotEqual(self.old, temp)
new_dummy = textlib.getCategoryLinks(temp, site=self.site)[3]
self.assertIsNotNone(new_dummy.sortKey)
self.assertEqual(orig_sortkey, new_dummy.sortKey)
class TestTemplatesInCategory(TestCase):
"""Tests to verify that templates in category links are handled."""
family = 'wikipedia'
code = 'en'
cached = True
def test_templates(self):
"""Test normal templates inside category links."""
self.site = self.get_site()
self.assertEqual(textlib.getCategoryLinks(
'[[Category:{{P1|Foo}}]]', self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo')])
self.assertEqual(textlib.getCategoryLinks(
'[[Category:Foo{{!}}bar]][[Category:Wiki{{P2||pedia}}]]',
self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo', sort_key='bar'),
pywikibot.page.Category(self.site, 'Wikipedia')])
self.assertEqual(textlib.getCategoryLinks(
'[[Category:Foo{{!}}and{{!}}bar]]', self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo', sort_key='and|bar')])
for pattern in ('[[Category:{{P1|Foo}}|bar]]',
'[[Category:{{P1|{{P2|L33t|Foo}}}}|bar]]',
'[[Category:Foo{{!}}bar]]'):
with self.subTest(pattern=pattern):
self.assertEqual(textlib.getCategoryLinks(
pattern, self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo',
sort_key='bar')])
with mock.patch.object(pywikibot, 'warning', autospec=True) as warn:
textlib.getCategoryLinks('[[Category:nasty{{{!}}]]', self.site)
warn.assert_called_once_with(
'Invalid category title extracted: nasty{{{!}}')
class TestTemplateParams(TestCase):
"""Test to verify that template params extraction works."""
net = False
def _common_results(self, func):
"""Common cases."""
self.assertEqual(func('{{a}}'), [('a', OrderedDict())])
self.assertEqual(func('{{ a}}'), [('a', OrderedDict())])
self.assertEqual(func('{{a }}'), [('a', OrderedDict())])
self.assertEqual(func('{{ a }}'), [('a', OrderedDict())])
self.assertEqual(func('{{a|b=c}}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b|c=d}}'),
[('a', OrderedDict((('1', 'b'), ('c', 'd'))))])
self.assertEqual(func('{{a|b=c|f=g|d=e|1=}}'),
[('a', OrderedDict((('b', 'c'), ('f', 'g'),
('d', 'e'), ('1', ''))))])
self.assertEqual(func('{{a|1=2|c=d}}'),
[('a', OrderedDict((('1', '2'), ('c', 'd'))))])
self.assertEqual(func('{{a|c=d|1=2}}'),
[('a', OrderedDict((('c', 'd'), ('1', '2'))))])
self.assertEqual(func('{{a|5=d|a=b}}'),
[('a', OrderedDict((('5', 'd'), ('a', 'b'))))])
self.assertEqual(func('{{a|=2}}'),
[('a', OrderedDict((('', '2'), )))])
self.assertEqual(func('{{a|}}'),
[('a', OrderedDict((('1', ''), )))])
self.assertEqual(func('{{a|=|}}'),
[('a', OrderedDict((('', ''), ('1', ''))))])
self.assertEqual(func('{{a||}}'),
[('a', OrderedDict((('1', ''), ('2', ''))))])
self.assertEqual(func('{{a|b={{{1}}}}}'),
[('a', OrderedDict((('b', '{{{1}}}'), )))])
self.assertEqual(func('{{a|b=<noinclude>{{{1}}}</noinclude>}}'),
[('a',
OrderedDict((('b',
'<noinclude>{{{1}}}</noinclude>'),
)))])
self.assertEqual(func('{{Template:a|b=c}}'),
[('Template:a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{template:a|b=c}}'),
[('template:a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{:a|b=c}}'),
[(':a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b={{{1}}}|c={{{2}}}}}'),
[('a', OrderedDict((('b', '{{{1}}}'),
('c', '{{{2}}}'))))])
self.assertEqual(func('{{a|b=c}}{{d|e=f}}'),
[('a', OrderedDict((('b', 'c'), ))),
('d', OrderedDict((('e', 'f'), )))])
# initial '{' and '}' should be ignored as outer wikitext
self.assertEqual(func('{{{a|b}}X}'),
[('a', OrderedDict((('1', 'b'), )))])
# sf.net bug 1575: unclosed template
self.assertEqual(func('{{a'), [])
self.assertEqual(func('{{a}}{{foo|'), [('a', OrderedDict())])
def _unstripped(self, func):
"""Common cases of unstripped results."""
self.assertEqual(func('{{a|b=<!--{{{1}}}-->}}'),
[('a', OrderedDict((('b', '<!--{{{1}}}-->'), )))])
self.assertEqual(func('{{a| }}'),
[('a', OrderedDict((('1', ' '), )))])
self.assertEqual(func('{{a| | }}'),
[('a', OrderedDict((('1', ' '), ('2', ' '))))])
self.assertEqual(func('{{a| =|}}'),
[('a', OrderedDict(((' ', ''), ('1', ''))))])
self.assertEqual(func('{{a| b=c}}'),
[('a', OrderedDict(((' b', 'c'), )))])
self.assertEqual(func('{{a|b =c}}'),
[('a', OrderedDict((('b ', 'c'), )))])
self.assertEqual(func('{{a|b= c}}'),
[('a', OrderedDict((('b', ' c'), )))])
self.assertEqual(func('{{a|b=c }}'),
[('a', OrderedDict((('b', 'c '), )))])
self.assertEqual(func('{{a| foo |2= bar }}'),
[('a', OrderedDict((('1', ' foo '),
('2', ' bar '))))])
# The correct entry 'bar' is removed
self.assertEqual(func('{{a| foo |2= bar | baz }}'),
[('a', OrderedDict((('1', ' foo '),
('2', ' baz '))))])
# However whitespace prevents the correct item from being removed
self.assertEqual(func('{{a| foo | 2 = bar | baz }}'),
[('a', OrderedDict((('1', ' foo '), (' 2 ', ' bar '),
('2', ' baz '))))])
def _stripped(self, func):
"""Common cases of stripped results."""
self.assertEqual(func('{{a| }}'),
[('a', OrderedDict((('1', ' '), )))])
self.assertEqual(func('{{a| | }}'),
[('a', OrderedDict((('1', ' '), ('2', ' '))))])
self.assertEqual(func('{{a| =|}}'),
[('a', OrderedDict((('', ''), ('1', ''))))])
self.assertEqual(func('{{a| b=c}}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b =c}}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b= c}}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b=c }}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a| foo |2= bar }}'),
[('a', OrderedDict((('1', ' foo '), ('2', 'bar'))))])
# 'bar' is always removed
self.assertEqual(func('{{a| foo |2= bar | baz }}'),
[('a', OrderedDict((('1', ' foo '),
('2', ' baz '))))])
self.assertEqual(func('{{a| foo | 2 = bar | baz }}'),
[('a', OrderedDict((('1', ' foo '),
('2', ' baz '))))])
def _etp_regex_differs(self, func):
"""Common cases not handled the same by ETP_REGEX."""
# inner {} should be treated as part of the value
self.assertEqual(func('{{a|b={} }}'),
[('a', OrderedDict((('b', '{} '), )))])
def _order_differs(self, func):
"""Common cases where the order of templates differs."""
self.assertCountEqual(func('{{a|b={{c}}}}'),
[('a', OrderedDict((('b', '{{c}}'), ))),
('c', OrderedDict())])
self.assertCountEqual(func('{{a|{{c|d}}}}'),
[('c', OrderedDict((('1', 'd'), ))),
('a', OrderedDict([('1', '{{c|d}}')]))])
# inner '}' after {{b|c}} should be treated as wikitext
self.assertCountEqual(func('{{a|{{b|c}}}|d}}'),
[('a', OrderedDict([('1', '{{b|c}}}'),
('2', 'd')])),
('b', OrderedDict([('1', 'c')]))])
def _mwpfh_passes(self, func):
"""Common cases failing with wikitextparser but passes with mwpfh.
Probably the behaviour of regex or mwpfh is wrong.
"""
failing = has_module('wikitextparser')
patterns = [
'{{subst:a|b=c}}',
'{{safesubst:a|b=c}}',
'{{msgnw:a|b=c}}',
'{{subst::a|b=c}}'
]
context = self.assertRaises(AssertionError) \
if failing else nullcontext()
for template in patterns:
with self.subTest(template=template, failing=failing):
name = template.strip('{}').split('|')[0]
with context:
self.assertEqual(func(template),
[(name, OrderedDict((('b', 'c'), )))])
@require_modules('mwparserfromhell')
def test_extract_templates_params_mwpfh(self):
"""Test using mwparserfromhell."""
func = textlib.extract_templates_and_params
self._common_results(func)
self._order_differs(func)
self._unstripped(func)
self._etp_regex_differs(func)
self._mwpfh_passes(func)
self.assertCountEqual(func('{{a|{{c|{{d}}}}}}'),
[('c', OrderedDict((('1', '{{d}}'), ))),
('a', OrderedDict([('1', '{{c|{{d}}}}')])),
('d', OrderedDict())
])
self.assertCountEqual(func('{{a|{{c|{{d|}}}}}}'),
[('c', OrderedDict((('1', '{{d|}}'), ))),
('a', OrderedDict([('1', '{{c|{{d|}}}}')])),
('d', OrderedDict([('1', '')]))
])
@require_modules('mwparserfromhell')
def test_extract_templates_params_parser_stripped(self):
"""Test using mwparserfromhell with stripping."""
func = functools.partial(textlib.extract_templates_and_params,
strip=True)
self._common_results(func)
self._order_differs(func)
self._stripped(func)
@require_modules('wikitextparser')
def test_extract_templates_params_parser(self):
"""Test using wikitextparser."""
func = textlib.extract_templates_and_params
self._common_results(func)
self._order_differs(func)
self._unstripped(func)
self._etp_regex_differs(func)
self._mwpfh_passes(func)
self.assertCountEqual(func('{{a|{{c|{{d}}}}}}'),
[('c', OrderedDict((('1', '{{d}}'), ))),
('a', OrderedDict([('1', '{{c|{{d}}}}')])),
('d', OrderedDict())
])
self.assertCountEqual(func('{{a|{{c|{{d|}}}}}}'),
[('c', OrderedDict((('1', '{{d|}}'), ))),
('a', OrderedDict([('1', '{{c|{{d|}}}}')])),
('d', OrderedDict([('1', '')]))
])
@require_modules('mwparserfromhell')
def test_extract_templates_params(self):
"""Test that the normal entry point works."""
func = functools.partial(textlib.extract_templates_and_params,
remove_disabled_parts=False, strip=False)
self._common_results(func)
self._unstripped(func)
func = functools.partial(textlib.extract_templates_and_params,
remove_disabled_parts=False, strip=True)
self._common_results(func)
self._stripped(func)
def test_template_simple_regex(self):
"""Test using simple regex."""
func = textlib.extract_templates_and_params_regex_simple
self._common_results(func)
self._etp_regex_differs(func)
# The simple regex copies the whitespace of mwpfh, but does
# not have additional entries for nested templates.
self.assertEqual(func('{{a| b={{c}}}}'),
[('a', OrderedDict(((' b', '{{c}}'), )))])
self.assertEqual(func('{{a|b={{c}}}}'),
[('a', OrderedDict((('b', '{{c}}'), )))])
self.assertEqual(func('{{a|b= {{c}}}}'),
[('a', OrderedDict((('b', ' {{c}}'), )))])
self.assertEqual(func('{{a|b={{c}} }}'),
[('a', OrderedDict((('b', '{{c}} '), )))])
# These three are from _order_differs, and while the first works
self.assertEqual(func('{{a|{{c}} }}'),
[('a', OrderedDict((('1', '{{c}} '), )))])
# an inner '|' causes extract_template_and_params_regex_simple to
# split arguments incorrectly in the next two cases.
self.assertEqual(func('{{a|{{c|d}} }}'),
[('a', OrderedDict([('1', '{{c'),
('2', 'd}} ')]))])
self.assertEqual(func('{{a|{{b|c}}}|d}}'),
[('a', OrderedDict([('1', '{{b'),
('2', 'c}}}'),
('3', 'd')]))])
# Safe fallback to handle arbitrary template levels
# by merging top level templates together.
# i.e. 'b' is not recognised as a template, and 'foo' is also
# consumed as part of 'a'.
self.assertEqual(func('{{a|{{c|{{d|{{e|}}}} }} }} foo {{b}}'),
[(None, OrderedDict())])
def test_nested_template_regex_search(self):
"""Test NESTED_TEMPLATE_REGEX search."""
func = textlib.NESTED_TEMPLATE_REGEX.search
# Numerically named templates are rejected
self.assertIsNone(func('{{1}}'))
self.assertIsNone(func('{{#if:foo}}'))
self.assertIsNone(func('{{{1}}}'))
self.assertIsNone(func('{{{1|}}}'))
self.assertIsNone(func('{{{15|a}}}'))
self.assertIsNone(func('{{{1|{{{2|a}}} }}}'))
self.assertIsNone(func('{{{1|{{2|a}} }}}'))
def test_nested_template_regex_match(self):
"""Test NESTED_TEMPLATE_REGEX match."""
func = textlib.NESTED_TEMPLATE_REGEX.match
self.assertIsNotNone(func('{{CURRENTYEAR}}'))
self.assertIsNotNone(func('{{foo:bar}}'))
self.assertIsNone(func('{{1}}'))
self.assertIsNotNone(func('{{a|b={{CURRENTYEAR}} }}'))
self.assertIsNotNone(func('{{a|b={{{1}}} }}'))
self.assertIsNotNone(func('{{a|b={{c}} }}'))
self.assertIsNotNone(func('{{a|b={{c|d=1}} }}'))
self.assertIsNotNone(func('{{a|b={} }}'))
self.assertIsNotNone(func('{{:a|b={{c|d=1}} }}'))
self.assertIsNotNone(func('{{a|{{c}} }}'))
self.assertIsNotNone(func('{{a|{{c|d}} }}'))
# All templates are captured when template depth is greater than 2
patterns = '{{a|{{c|{{d|}} }} | foo = bar }} foo {{bar}} baz', \
'{{a|\n{{c|{{d|}} }}\n| foo = bar }} foo {{bar}} baz'
for pattern in patterns:
m = func(pattern)
self.assertIsNotNone(m)
self.assertIsNotNone(m.group(0))
self.assertIsNone(m.group('name'))
self.assertIsNone(m.group(1))
self.assertIsNone(m.group('params'))
self.assertIsNone(m.group(2))
self.assertIsNotNone(m.group('unhandled_depth'))
self.assertTrue(m.group(0).endswith('foo {{bar}}'))
class TestDisabledParts(DefaultDrySiteTestCase):
"""Test the removeDisabledParts function in textlib."""
def test_remove_disabled_parts(self):
"""Test removeDisabledParts function."""
tests = {
'comment': '<!-- No comment yet -->',
'link': '[[Target link]]',
'source': '<source>foo := bar</source>',
'template': '{{Infobox\n|foo = bar}}',
'unknown': '<Unknown>This is an unknown pattern</unKnown>',
}
for test, pattern in tests.items():
with self.subTest(test=test):
self.assertEqual(
textlib.removeDisabledParts(pattern, tags=[test]), '')
def test_remove_disabled_parts_include(self):
"""Test removeDisabledParts function with the include argument."""
text = 'text <nowiki>tag</nowiki> text'
self.assertEqual(
textlib.removeDisabledParts(text, include=['nowiki']), text)
def test_remove_disabled_parts_order(self):
"""Test the order of the replacements in removeDisabledParts."""
text = 'text <ref>This is a reference.</ref> text'
regex = re.compile('</?ref>')
self.assertEqual(
textlib.removeDisabledParts(text, tags=['ref', regex]),
'text text')
self.assertEqual(
textlib.removeDisabledParts(text, tags=[regex, 'ref']),
'text This is a reference. text')
class TestReplaceLinks(TestCase):
"""Test the replace_links function in textlib."""
sites = {
'wt': {
'family': 'wiktionary',
'code': 'en',
},
'wp': {
'family': 'wikipedia',
'code': 'en',
}
}
dry = True
text = ('Hello [[World]], [[how|are]] [[you#section|you]]? Are [[you]] a '
'[[bug:1337]]?')
@classmethod
def setUpClass(cls):
"""Create a fake interwiki cache."""
super().setUpClass()
# make APISite.interwiki work and prevent it from doing requests
for site in cls.sites.values():
mapping = {}
for iw in cls.sites.values():
mapping[iw['family']] = _IWEntry(True, 'invalid')
mapping[iw['family']]._site = iw['site']
mapping['bug'] = _IWEntry(False, 'invalid')
mapping['bug']._site = UnknownSiteError('Not a wiki')
mapping['en'] = _IWEntry(True, 'invalid')
mapping['en']._site = site['site']
site['site']._interwikimap._map = mapping
site['site']._interwikimap._site = None # prevent it from loading
cls.wp_site = cls.get_site('wp')
def test_replacements_function(self):
"""Test a dynamic function as the replacements."""
def callback(link, text, groups, rng):
self.assertEqual(link.site, self.wp_site)
if link.title == 'World':
return pywikibot.Link('Homeworld', link.site)
if link.title.lower() == 'you':
return False
return None
self.assertEqual(
textlib.replace_links(self.text, callback, self.wp_site),
'Hello [[Homeworld]], [[how|are]] you? Are you a [[bug:1337]]?')
def test_replacements_once(self):
"""Test dynamic replacement."""
def callback(link, text, groups, rng):
if link.title.lower() == 'you':
self._count += 1
if link.section:
return pywikibot.Link(
'{0}#{1}'
.format(self._count, link.section), link.site)
return pywikibot.Link('{0}'.format(self._count), link.site)
return None
self._count = 0 # buffer number of found instances
self.assertEqual(
textlib.replace_links(self.text, callback, self.wp_site),
'Hello [[World]], [[how|are]] [[1#section]]? Are [[2]] a '
'[[bug:1337]]?')
del self._count
def test_unlink_all(self):
"""Test unlinking."""
def callback(link, text, groups, rng):
self.assertEqual(link.site, self.wp_site)
return False
self.assertEqual(
textlib.replace_links(self.text, callback, self.wp_site),
'Hello World, are you? Are you a [[bug:1337]]?')
def test_unlink_some(self):
"""Test unlinking only some links."""
self.assertEqual(
textlib.replace_links(self.text, ('World', False), self.wp_site),
'Hello World, [[how|are]] [[you#section|you]]? Are [[you]] a '
'[[bug:1337]]?')
self.assertEqual(
textlib.replace_links('[[User:Namespace|Label]]\n'
'[[User:Namespace#Section|Labelz]]\n'
'[[Nothing]]',
('User:Namespace', False),
self.wp_site),
'Label\nLabelz\n[[Nothing]]')
def test_replace_neighbour(self):
"""Test that it replaces two neighbouring links."""
self.assertEqual(
textlib.replace_links('[[A]][[A]][[C]]',
('A', 'B'),
self.wp_site),
'[[B|A]][[B|A]][[C]]')
def test_replacements_simplify(self):
"""Test a tuple as replacement removing the need for a piped link."""
self.assertEqual(
textlib.replace_links(self.text,
('how', 'are'),
self.wp_site),
'Hello [[World]], [[are]] [[you#section|you]]? Are [[you]] a '
'[[bug:1337]]?')
def test_replace_file(self):
"""Test that it respects the namespace."""
self.assertEqual(
textlib.replace_links(
'[[File:Meh.png|thumb|Description of [[fancy]]]] '
'[[Fancy]]...', ('File:Meh.png', 'File:Fancy.png'),
self.wp_site),
'[[File:Fancy.png|thumb|Description of [[fancy]]]] [[Fancy]]...')
def test_replace_strings(self):
"""Test if strings can be used."""
self.assertEqual(
textlib.replace_links(self.text, ('how', 'are'), self.wp_site),
'Hello [[World]], [[are]] [[you#section|you]]? Are [[you]] a '
'[[bug:1337]]?')
def test_replace_invalid_link_text(self):
"""Test that it doesn't pipe a link when it's an invalid link."""
self.assertEqual(
textlib.replace_links('[[Target|Foo:]]', ('Target', 'Foo'),
self.wp_site), '[[Foo|Foo:]]')
def test_replace_modes(self):
"""Test replacing with or without label and section."""
source_text = '[[Foo#bar|baz]]'
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar'), self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Page(self.wp_site, 'Bar')),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Link('Bar', self.wp_site)),
self.wp_site),
'[[Bar]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar#snafu'),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Page(self.wp_site,
'Bar#snafu')),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Link('Bar#snafu',
self.wp_site)),
self.wp_site),
'[[Bar#snafu]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar|foo'),
self.wp_site), '[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Page(self.wp_site,
'Bar|foo')),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Link('Bar|foo',
self.wp_site)),
self.wp_site),
'[[Bar|foo]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar#snafu|foo'),
self.wp_site), '[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Page(self.wp_site,
'Bar#snafu|foo')),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Link('Bar#snafu|foo',
self.wp_site)),
self.wp_site),
'[[Bar#snafu|foo]]')
def test_replace_different_case(self):
"""Test that it uses piped links when the case is different."""
source_text = '[[Foo|Bar]] and [[Foo|bar]]'
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'bar'),
self.get_site('wp')),
'[[Bar]] and [[bar]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'bar'),
self.get_site('wt')),
'[[bar|Bar]] and [[bar]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar'),
self.get_site('wt')),
'[[Bar]] and [[Bar|bar]]')
@unittest.expectedFailure
def test_label_diff_namespace(self):
"""Test that it uses the old label when the new doesn't match."""
# These tests require to get the actual part which is before the title
# (interwiki and namespace prefixes) which could be then compared
# case insensitive.
self.assertEqual(
textlib.replace_links('[[Image:Foobar]]',
('File:Foobar', 'File:Foo'), self.wp_site),
'[[File:Foo|Image:Foobar]]')
self.assertEqual(
textlib.replace_links('[[en:File:Foobar]]',
('File:Foobar', 'File:Foo'), self.wp_site),
'[[File:Foo|en:File:Foobar]]')
def test_linktrails(self):
"""Test that the linktrails are used or applied."""
self.assertEqual(
textlib.replace_links('[[Foobar]]', ('Foobar', 'Foo'),
self.wp_site),
'[[Foo]]bar')
self.assertEqual(
textlib.replace_links('[[Talk:test]]s',
('Talk:Test', 'Talk:Tests'), self.wp_site),
'[[Talk:tests]]')
self.assertEqual(
textlib.replace_links('[[Talk:test]]s',
('Talk:Test', 'Project:Tests'),
self.wp_site),
'[[Project:Tests|Talk:tests]]')
def test_unicode_callback(self):
"""Test returning unicode in the callback."""
def callback(link, text, groups, rng):
self.assertEqual(link.site, self.wp_site)
if link.title == 'World':
# This must be a unicode instance not bytes
return 'homewörlder'
return None
self.assertEqual(
textlib.replace_links(self.text, callback, self.wp_site),
'Hello homewörlder, [[how|are]] [[you#section|you]]? '
'Are [[you]] a [[bug:1337]]?')
def test_bytes_callback(self):
"""Test returning bytes in the callback."""
def callback(link, text, groups, rng):
self.assertEqual(link.site, self.wp_site)
if link.title == 'World':
# This must be a bytes instance not unicode
return b'homeworlder'
return None
with self.assertRaisesRegex(ValueError,
r'The result must be str and not bytes\.'):
textlib.replace_links(self.text, callback, self.wp_site)
def test_replace_interwiki_links(self):
"""Make sure interwiki links cannot be replaced."""
link = '[[fr:how]]'
self.assertEqual(
textlib.replace_links(link, ('fr:how', 'de:are'), self.wp_site),
link)
self.assertEqual(
textlib.replace_links(link, (':fr:how', ':de:are'), self.wp_site),
link)
self.assertEqual(
textlib.replace_links(link, ('how', 'de:are'), self.wp_site),
link)
self.assertEqual(
textlib.replace_links(link, ('de:how', 'de:are'), self.wp_site),
link)
class TestReplaceLinksNonDry(TestCase):
"""Test the replace_links function in textlib non-dry."""
family = 'wikipedia'
code = 'en'
cached = True
def test_replace_interlanguage_links(self):
"""Test replacing interlanguage links."""
link = '[[:fr:how]]'
self.assertEqual(
textlib.replace_links(link, (':fr:how', ':de:are'),
self.site),
'[[:de:Are|fr:how]]')
self.assertEqual(
textlib.replace_links(link, ('fr:how', 'de:are'),
self.site),
'[[:de:Are|fr:how]]')
self.assertEqual(
textlib.replace_links(link, ('how', ':de:are'),
self.site),
link)
self.assertEqual(
textlib.replace_links(link, (':de:how', ':de:are'),
self.site),
link)
class TestLocalDigits(TestCase):
"""Test to verify that local digits are correctly being handled."""
net = False
def test_to_local(self):
"""Test converting Latin digits to local digits."""
self.assertEqual(textlib.to_local_digits(299792458, 'en'), 299792458)
self.assertEqual(
textlib.to_local_digits(299792458, 'fa'), '۲۹۹۷۹۲۴۵۸')
self.assertEqual(
textlib.to_local_digits(
'299792458 flash', 'fa'), '۲۹۹۷۹۲۴۵۸ flash')
self.assertEqual(
textlib.to_local_digits(
'299792458', 'km'), '២៩៩៧៩២៤៥៨')
class TestReplaceExcept(DefaultDrySiteTestCase):
"""Test to verify the replacements with exceptions are done correctly."""
def test_no_replace(self):
"""Test replacing when the old text does not match."""
self.assertEqual(textlib.replaceExcept('12345678', 'x', 'y', [],
site=self.site),
'12345678')
def test_simple_replace(self):
"""Test replacing without regex."""
self.assertEqual(textlib.replaceExcept('AxB', 'x', 'y', [],
site=self.site),
'AyB')
self.assertEqual(textlib.replaceExcept('AxxB', 'x', 'y', [],
site=self.site),
'AyyB')
self.assertEqual(textlib.replaceExcept('AxyxB', 'x', 'y', [],
site=self.site),
'AyyyB')
def test_regex_replace(self):
"""Test replacing with a regex."""
self.assertEqual(textlib.replaceExcept('A123B', r'\d', r'x', [],
site=self.site),
'AxxxB')
self.assertEqual(textlib.replaceExcept('A123B', r'\d+', r'x', [],
site=self.site),
'AxB')
self.assertEqual(textlib.replaceExcept('A123B',
r'A(\d)2(\d)B', r'A\1x\2B', [],
site=self.site),
'A1x3B')
self.assertEqual(
textlib.replaceExcept('', r'(a?)', r'\1B', [], site=self.site),
'B')
self.assertEqual(
textlib.replaceExcept('abc', r'x*', r'-', [], site=self.site),
'-a-b-c-')
# This is different from re.sub() as re.sub() doesn't
# allow None groups
self.assertEqual(
textlib.replaceExcept('', r'(a)?', r'\1\1', [], site=self.site),
'')
self.assertEqual(
textlib.replaceExcept('A123B', r'A(\d)2(\d)B', r'A\g<1>x\g<2>B',
[], site=self.site),
'A1x3B')
self.assertEqual(
textlib.replaceExcept('A123B', r'A(?P<a>\d)2(?P<b>\d)B',
r'A\g<a>x\g<b>B', [], site=self.site),
'A1x3B')
self.assertEqual(
textlib.replaceExcept('A123B', r'A(?P<a>\d)2(\d)B',
r'A\g<a>x\g<2>B', [], site=self.site),
'A1x3B')
self.assertEqual(
textlib.replaceExcept('A123B', r'A(?P<a>\d)2(\d)B',
r'A\g<a>x\2B', [], site=self.site),
'A1x3B')
# test regex with lookbehind.
self.assertEqual(
textlib.replaceExcept('A behindB C', r'(?<=behind)\w',
r'Z', [], site=self.site),
'A behindZ C')
# test regex with lookbehind and groups.
self.assertEqual(
textlib.replaceExcept('A behindB C D', r'(?<=behind)\w( )',
r'\g<1>Z', [], site=self.site),
'A behind ZC D')
# test regex with lookahead.
self.assertEqual(
textlib.replaceExcept('A Bahead C', r'\w(?=ahead)',
r'Z', [], site=self.site),
'A Zahead C')
# test regex with lookahead and groups.
self.assertEqual(
textlib.replaceExcept('A Bahead C D', r'( )\w(?=ahead)',
r'Z\g<1>', [], site=self.site),
'AZ ahead C D')
def test_case_sensitive(self):
"""Test replacing with different case sensitivity."""
self.assertEqual(textlib.replaceExcept('AxB', 'x', 'y', [],
caseInsensitive=False,
site=self.site),
'AyB')
self.assertEqual(textlib.replaceExcept('AxB', 'X', 'y', [],
caseInsensitive=False,
site=self.site),
'AxB')
self.assertEqual(textlib.replaceExcept('AxB', 'x', 'y', [],
caseInsensitive=True,
site=self.site),
'AyB')
self.assertEqual(textlib.replaceExcept('AxB', 'X', 'y', [],
caseInsensitive=True,
site=self.site),
'AyB')
def test_replace_with_marker(self):
"""Test replacing with a marker."""
self.assertEqual(textlib.replaceExcept('AxyxB', 'x', 'y', [],
marker='.',
site=self.site),
'Ayyy.B')
self.assertEqual(textlib.replaceExcept('AxyxB', '1', 'y', [],
marker='.',
site=self.site),
'AxyxB.')
def test_overlapping_replace(self):
"""Test replacing with and without overlap."""
self.assertEqual(textlib.replaceExcept('1111', '11', '21', [],
allowoverlap=False,
site=self.site),
'2121')
self.assertEqual(textlib.replaceExcept('1111', '11', '21', [],
allowoverlap=True,
site=self.site),
'2221')
self.assertEqual(textlib.replaceExcept('1\n= 1 =\n', '1', ' \n= 1 =\n',
['header'],
allowoverlap=True,
site=self.site),
' \n= 1 =\n\n= 1 =\n')
def test_replace_exception(self):
"""Test replacing not inside a specific regex."""
self.assertEqual(textlib.replaceExcept('123x123', '123', '000', [],
site=self.site),
'000x000')
self.assertEqual(textlib.replaceExcept('123x123', '123', '000',
[re.compile(r'\w123')],
site=self.site),
'000x123')
self.assertEqual(
textlib.replaceExcept(
'1\n= 1 =\n', '1', 'verylongreplacement', ['header'],
site=self.site),
'verylongreplacement\n= 1 =\n')
def test_replace_tags(self):
"""Test replacing not inside various tags."""
self.assertEqual(textlib.replaceExcept('A <!-- x --> B', 'x', 'y',
['comment'], site=self.site),
'A <!-- x --> B')
self.assertEqual(textlib.replaceExcept('\n==x==\n', 'x', 'y',
['header'], site=self.site),
'\n==x==\n')
self.assertEqual(textlib.replaceExcept('\n<!--'
'\ncomment-->==x==<!--comment'
'\n-->\n', 'x', 'y',
['header'], site=self.site),
'\n<!--\ncomment-->==x==<!--comment\n-->\n')
self.assertEqual(textlib.replaceExcept('<pre>x</pre>', 'x', 'y',
['pre'], site=self.site),
'<pre>x</pre>')
self.assertEqual(textlib.replaceExcept('<nowiki >x</nowiki >x',
'x', 'y', ['nowiki'],
site=self.site),
'<nowiki >x</nowiki >y') # T191559
self.assertEqual(textlib.replaceExcept('<source lang="xml">x</source>',
'x', 'y', ['source'],
site=self.site),
'<source lang="xml">x</source>')
self.assertEqual(
textlib.replaceExcept('<syntaxhighlight>x</syntaxhighlight>',
'x', 'y', ['source'], site=self.site),
'<syntaxhighlight>x</syntaxhighlight>')
self.assertEqual(
textlib.replaceExcept(
'<syntaxhighlight lang="xml">x</syntaxhighlight>',
'x', 'y', ['source'], site=self.site),
'<syntaxhighlight lang="xml">x</syntaxhighlight>')
self.assertEqual(
textlib.replaceExcept('<source>x</source>',
'x', 'y', ['syntaxhighlight'],
site=self.site),
'<source>x</source>')
self.assertEqual(textlib.replaceExcept('<includeonly>x</includeonly>',
'x', 'y', ['includeonly'],
site=self.site),
'<includeonly>x</includeonly>')
self.assertEqual(textlib.replaceExcept('<ref>x</ref>', 'x', 'y',
['ref'], site=self.site),
'<ref>x</ref>')
self.assertEqual(textlib.replaceExcept('<ref name="x">A</ref>',
'x', 'y',
['ref'], site=self.site),
'<ref name="x">A</ref>')
self.assertEqual(textlib.replaceExcept(' xA ', 'x', 'y',
['startspace'], site=self.site),
' xA ')
self.assertEqual(textlib.replaceExcept(':xA ', 'x', 'y',
['startcolon'], site=self.site),
':xA ')
self.assertEqual(textlib.replaceExcept('<table>x</table>', 'x', 'y',
['table'], site=self.site),
'<table>x</table>')
self.assertEqual(textlib.replaceExcept('x [http://www.sample.com x]',
'x', 'y', ['hyperlink'],
site=self.site),
'y [http://www.sample.com y]')
self.assertEqual(textlib.replaceExcept(
'x http://www.sample.com/x.html', 'x', 'y',
['hyperlink'], site=self.site), 'y http://www.sample.com/x.html')
self.assertEqual(textlib.replaceExcept('<gallery>x</gallery>',
'x', 'y', ['gallery'],
site=self.site),
'<gallery>x</gallery>')
self.assertEqual(textlib.replaceExcept('[[x]]', 'x', 'y', ['link'],
site=self.site),
'[[x]]')
self.assertEqual(textlib.replaceExcept('{{#property:p171}}', '1', '2',
['property'], site=self.site),
'{{#property:p171}}')
self.assertEqual(textlib.replaceExcept('{{#invoke:x}}', 'x', 'y',
['invoke'], site=self.site),
'{{#invoke:x}}')
self.assertEqual(
textlib.replaceExcept(
'<ref name=etwa /> not_in_ref <ref> in_ref </ref>',
'not_in_ref', 'text', ['ref'], site=self.site),
'<ref name=etwa /> text <ref> in_ref </ref>')
self.assertEqual(
textlib.replaceExcept(
'<ab> content </a>', 'content', 'text', ['a'], site=self.site),
'<ab> text </a>')
def test_replace_with_count(self):
"""Test replacing with count argument."""
self.assertEqual(textlib.replaceExcept('x [[x]] x x', 'x', 'y', [],
site=self.site),
'y [[y]] y y')
self.assertEqual(textlib.replaceExcept('x [[x]] x x', 'x', 'y', [],
site=self.site, count=5),
'y [[y]] y y')
self.assertEqual(textlib.replaceExcept('x [[x]] x x', 'x', 'y', [],
site=self.site, count=2),
'y [[y]] x x')
self.assertEqual(textlib.replaceExcept(
'x [[x]] x x', 'x', 'y', ['link'], site=self.site, count=2),
'y [[x]] y x')
def test_replace_tag_category(self):
"""Test replacing not inside category links."""
for ns_name in self.site.namespaces[14]:
self.assertEqual(textlib.replaceExcept('[[{}:x]]'.format(ns_name),
'x', 'y', ['category'],
site=self.site),
'[[{}:x]]'.format(ns_name))
def test_replace_tag_file(self):
"""Test replacing not inside file links."""
for ns_name in self.site.namespaces[6]:
self.assertEqual(textlib.replaceExcept('[[{}:x]]'.format(ns_name),
'x', 'y', ['file'],
site=self.site),
'[[{}:x]]'.format(ns_name))
self.assertEqual(
textlib.replaceExcept(
'[[File:x|foo]]',
'x', 'y', ['file'], site=self.site),
'[[File:x|foo]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:x|]]',
'x', 'y', ['file'], site=self.site),
'[[File:x|]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:x|foo|bar x]] x',
'x', 'y', ['file'], site=self.site),
'[[File:x|foo|bar x]] y')
self.assertEqual(
textlib.replaceExcept(
'[[File:x|]][[File:x|foo]]',
'x', 'y', ['file'], site=self.site),
'[[File:x|]][[File:x|foo]]')
self.assertEqual(
textlib.replaceExcept(
'[[NonFile:x]]',
'x', 'y', ['file'], site=self.site),
'[[NonFile:y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:]]',
'File:', 'NonFile:', ['file'], site=self.site),
'[[File:]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:x|[[foo]].]]',
'x', 'y', ['file'], site=self.site),
'[[File:x|[[foo]].]]')
# ensure only links inside file are captured
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]].x]][[y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]][[bar]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]][[bar]].x]][[y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]][[bar]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]][[bar]].x]][[y]]')
# Correctly handle single brackets in the text.
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]] [bar].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]] [bar].x]][[y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[bar] [[foo]] .x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[bar] [[foo]] .x]][[y]]')
def test_replace_tag_file_invalid(self):
"""Test replacing not inside file links with invalid titles."""
# Correctly handle [ and ] inside wikilinks inside file link
# even though these are an invalid title.
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]] [[bar [invalid] ]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]] [[bar [invalid] ]].x]][[y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]] [[bar [invalid ]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]] [[bar [invalid ]].x]][[y]]')
@unittest.expectedFailure
def test_replace_tag_file_failure(self):
"""Test showing limits of the file link regex."""
# When the double brackets are unbalanced, the regex
# does not correctly detect the end of the file link.
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]] [[bar [[invalid ]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]] [[bar [invalid] ]].x]][[y]]')
def test_replace_tags_interwiki(self):
"""Test replacing not inside interwiki links."""
if ('es' not in self.site.family.langs
or 'ey' in self.site.family.langs):
raise unittest.SkipTest("family {} doesn't have languages"
.format(self.site))
self.assertEqual(textlib.replaceExcept('[[es:s]]', 's', 't',
['interwiki'], site=self.site),
'[[es:s]]') # "es" is a valid interwiki code
self.assertEqual(textlib.replaceExcept('[[ex:x]]', 'x', 'y',
['interwiki'], site=self.site),
'[[ey:y]]') # "ex" is not a valid interwiki code
def test_replace_template(self):
"""Test replacing not inside templates."""
template_sample = (r'a {{templatename '
r' | accessdate={{Fecha|1993}} '
r' |atitle=The [[real title]] }}')
self.assertEqual(textlib.replaceExcept(template_sample, 'a', 'X',
['template'], site=self.site),
'X' + template_sample[1:])
template_sample = (r'a {{templatename '
r' | 1={{a}}2{{a}} '
r' | 2={{a}}1{{a}} }}')
self.assertEqual(textlib.replaceExcept(template_sample, 'a', 'X',
['template'], site=self.site),
'X' + template_sample[1:])
template_sample = (r'a {{templatename '
r' | 1={{{a}}}2{{{a}}} '
r' | 2={{{a}}}1{{{a}}} }}')
self.assertEqual(textlib.replaceExcept(template_sample, 'a', 'X',
['template'], site=self.site),
'X' + template_sample[1:])
# sf.net bug 1575: unclosed template
template_sample = template_sample[:-2]
self.assertEqual(textlib.replaceExcept(template_sample, 'a', 'X',
['template'], site=self.site),
'X' + template_sample[1:])
def test_replace_source_reference(self):
"""Test replacing in text which contains back references."""
# Don't use a valid reference number in the original string,
# in case it tries to apply that as a reference.
self.assertEqual(textlib.replaceExcept(r'\42', r'^(.*)$', r'X\1X',
[], site=self.site),
r'X\42X')
self.assertEqual(textlib.replaceExcept(
r'\g<bar>', r'^(?P<foo>.*)$', r'X\g<foo>X', [], site=self.site),
r'X\g<bar>X')
class TestMultiTemplateMatchBuilder(DefaultDrySiteTestCase):
"""Test MultiTemplateMatchBuilder."""
@classmethod
def setUpClass(cls):
"""Cache namespace 10 (Template) case sensitivity."""
super().setUpClass()
cls._template_not_case_sensitive = (
cls.get_site().namespaces.TEMPLATE.case != 'case-sensitive')
def test_no_match(self):
"""Test text without any desired templates."""
string = 'The quick brown fox'
builder = MultiTemplateMatchBuilder(self.site)
self.assertIsNone(re.search(builder.pattern('quick'), string))
def test_match(self):
"""Test text with one match without parameters."""
string = 'The {{quick}} brown fox'
builder = MultiTemplateMatchBuilder(self.site)
self.assertIsNotNone(re.search(builder.pattern('quick'), string))
self.assertEqual(bool(re.search(builder.pattern('Quick'), string)),
self._template_not_case_sensitive)
def test_match_with_params(self):
"""Test text with one match with parameters."""
string = 'The {{quick|brown}} fox'
builder = MultiTemplateMatchBuilder(self.site)
self.assertIsNotNone(re.search(builder.pattern('quick'), string))
self.assertEqual(bool(re.search(builder.pattern('Quick'), string)),
self._template_not_case_sensitive)
def test_match_msg(self):
"""Test text with {{msg:..}}."""
string = 'The {{msg:quick}} brown fox'
builder = MultiTemplateMatchBuilder(self.site)
self.assertIsNotNone(re.search(builder.pattern('quick'), string))
self.assertEqual(bool(re.search(builder.pattern('Quick'), string)),
self._template_not_case_sensitive)
def test_match_template_prefix(self):
"""Test pages with {{template:..}}."""
string = 'The {{%s:%s}} brown fox'
template = 'template'
builder = MultiTemplateMatchBuilder(self.site)
if self._template_not_case_sensitive:
quick_list = ('quick', 'Quick')
else:
quick_list = ('quick', )
for t in (template.upper(), template.lower(), template.title()):
for q in quick_list:
self.assertIsNotNone(re.search(builder.pattern('quick'),
string % (t, q)))
self.assertEqual(bool(re.search(builder.pattern('Quick'),
string % (t, q))),
self._template_not_case_sensitive)
class TestGetLanguageLinks(SiteAttributeTestCase):
"""Test :py:obj:`textlib.getLanguageLinks` function."""
sites = {
'enwp': {
'family': 'wikipedia',
'code': 'en',
},
'dewp': {
'family': 'wikipedia',
'code': 'de',
},
'commons': {
'family': 'commons',
'code': 'commons',
},
}
example_text = ('[[en:Site]] [[de:Site|Piped]] [[commons:Site]] '
'[[baden:Site]] [[fr:{{PAGENAME}}]]')
@classmethod
def setUpClass(cls):
"""Define set of valid targets for the example text."""
super().setUpClass()
cls.sites_set = {cls.enwp, cls.dewp}
def test_getLanguageLinks(self, key):
"""Test if the function returns the correct titles and sites."""
with mock.patch('pywikibot.output') as m:
lang_links = textlib.getLanguageLinks(self.example_text,
self.site)
m.assert_called_once_with(
'[getLanguageLinks] Text contains invalid interwiki link '
'[[fr:{{PAGENAME}}]].')
self.assertEqual({page.title() for page in lang_links.values()},
{'Site'})
self.assertEqual(set(lang_links), self.sites_set - {self.site})
class TestExtractSections(DefaultDrySiteTestCase):
"""Test the extract_sections function."""
def _extract_sections_tests(self, result, header, sections, footer):
"""Test extract_sections function."""
self.assertIsInstance(result, tuple)
self.assertIsInstance(result.sections, list)
self.assertEqual(result, (header, sections, footer))
self.assertEqual(result.header, header)
self.assertEqual(result.sections, sections)
self.assertEqual(result.footer, footer)
if result.sections:
for section in sections:
self.assertIsInstance(section, tuple)
self.assertLength(section, 2)
def test_no_sections_no_footer(self):
"""Test for text having no sections or footer."""
text = 'text'
result = extract_sections(text, self.site)
self._extract_sections_tests(result, text, [], '')
def test_no_sections_with_footer(self):
"""Test for text having footer but no section."""
text = 'text\n\n[[Category:A]]'
result = extract_sections(text, self.site)
self._extract_sections_tests(result, 'text\n\n', [], '[[Category:A]]')
def test_with_section_no_footer(self):
"""Test for text having sections but no footer."""
text = ('text\n\n'
'==title==\n'
'content')
result = extract_sections(text, self.site)
self._extract_sections_tests(
result, 'text\n\n', [('==title==', '\ncontent')], '')
def test_with_section_with_footer(self):
"""Test for text having sections and footer."""
text = ('text\n\n'
'==title==\n'
'content\n'
'[[Category:A]]\n')
result = extract_sections(text, self.site)
self._extract_sections_tests(
result,
'text\n\n', [('==title==', '\ncontent\n')], '[[Category:A]]\n')
def test_with_h1_and_h2_sections(self):
"""Test for text having h1 and h2 sections."""
text = ('text\n\n'
'=first level=\n'
'foo\n'
'==title==\n'
'bar')
result = extract_sections(text, self.site)
self._extract_sections_tests(
result,
'text\n\n',
[('=first level=', '\nfoo\n'), ('==title==', '\nbar')],
'')
def test_with_h4_and_h2_sections(self):
"""Test for text having h4 and h2 sections."""
text = ('text\n\n'
'====title====\n'
'==title 2==\n'
'content')
result = extract_sections(text, self.site)
self._extract_sections_tests(
result,
'text\n\n',
[('====title====', '\n'), ('==title 2==', '\ncontent')],
'')
def test_long_comment(self):
r"""Test for text having a long expanse of white space.
This is to catch certain regex issues caused by patterns like
r'(\s+)*$' (as found in older versions of extract_section).
They may not halt.
c.f.
https://www.regular-expressions.info/catastrophic.html
"""
text = '<!-- -->'
result = extract_sections(text, self.site)
self._extract_sections_tests(result, text, [], '')
if __name__ == '__main__': # pragma: no cover
with suppress(SystemExit):
unittest.main()
|
py | 1a52421ce6cdc4cf78f7fd76e6d05b680568f4df | from typing import Callable
import contracts
contracts.CONTRACTS_ENABLED = True
@contracts.pre(lambda func, arity: arity >= 0, "Arity cannot be negative")
def curry_explicit(func: Callable, arity: int) -> Callable:
"""
Transforms function from several arguments to function that takes arguments sequentially, for example:
f(a, b, c, d) -> f(a)(b)(c)(d)
"""
def inner_fun(*args) -> Callable:
if len(args) == arity:
return func(*args)
if len(args) > arity:
raise ValueError("Quantity of args should be equal to arity")
return lambda arg: inner_fun(*args, arg) # we add one argument with each function call
return inner_fun
|
py | 1a524252daaf47a13e04373083c4c05fec815a0e | from PySide import QtCore, QtGui
from androguard.misc import save_session
from androguard.core import androconf
from androguard.gui.apkloading import ApkLoadingThread
from androguard.gui.treewindow import TreeWindow
from androguard.gui.sourcewindow import SourceWindow
from androguard.gui.helpers import class2func
import os
class CustomTabBar(QtGui.QTabBar):
'''Subclass QTabBar to implement middle-click closing of tabs'''
def mouseReleaseEvent(self, event):
if event.button() == QtCore.Qt.MidButton:
self.tabCloseRequested.emit(self.tabAt(event.pos()))
super(QtGui.QTabBar, self).mouseReleaseEvent(event)
class MainWindow(QtGui.QMainWindow):
'''Main window:
self.central: QTabWidget in center area
self.dock: QDockWidget in left area
self.tree: TreeWindow(QTreeWidget) in self.dock
'''
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.setupApkLoading()
self.setupFileMenu()
self.setupHelpMenu()
self.setupCentral()
self.setupEmptyTree()
self.setupDock()
self.setWindowTitle("AndroGui")
self.showStatus("AndroGui")
def showStatus(self, msg):
'''Helper function called by any window to display a message
in status bar.
'''
androconf.debug(msg)
self.statusBar().showMessage(msg)
def about(self):
'''User clicked About menu. Display a Message box.'''
QtGui.QMessageBox.about(self, "About AndroGui",
"<p><b>AndroGui</b> is basically a Gui for Androguard :)." \
"<br>So we named it AndroGui :p. </p>")
def setupApkLoading(self):
self.apkLoadingThread = ApkLoadingThread()
self.connect(self.apkLoadingThread,
QtCore.SIGNAL("loadedApk(bool)"),
self.loadedApk)
def loadedApk(self, success):
if not success:
self.showStatus("Analysis of %s failed :(" %
str(self.apkLoadingThread.apk_path))
return
self.a = self.apkLoadingThread.a
self.d = self.apkLoadingThread.d
self.x = self.apkLoadingThread.x
self.updateDockWithTree()
self.cleanCentral()
self.showStatus("Analysis of %s done!" %
str(self.apkLoadingThread.apk_path))
def openFile(self, path=None):
'''User clicked Open menu. Display a Dialog to ask which APK to open.'''
if not path:
path = QtGui.QFileDialog.getOpenFileName(self, "Open File",
'', "APK Files (*.apk);;Androguard Session (*.ag)")
path = str(path[0])
if path:
self.showStatus("Analyzing %s..." % str(path))
self.apkLoadingThread.load(path)
def saveFile(self, path=None):
'''User clicked Save menu. Display a Dialog to ask whwre to save.'''
if not path:
path = QtGui.QFileDialog.getSaveFileName(self, "Save File",
'', "Androguard Session (*.ag)")
path = str(path[0])
if path:
self.showStatus("Saving %s..." % str(path))
self.saveSession(path)
def saveSession(self, path=None):
'''Save androguard session to same name as APK name except ending with .ag'''
path = self.apkLoadingThread.session_path if not path else path
if not path:
return
if not hasattr(self, "a") or not hasattr(self, "d") or not hasattr(self, "x"):
androconf.warning("session not saved because no Dalvik elements")
return
try:
save_session([self.a, self.d, self.x], path)
except RuntimeError, e:
androconf.error(str(e))
# http://stackoverflow.com/questions/2134706/hitting-maximum-recursion-depth-using-pythons-pickle-cpickle
androconf.error("Try increasing sys.recursionlimit")
os.remove(path)
androconf.warning("Session not saved")
def quit(self):
'''Clicked in File menu to exit or CTRL+Q to close main window'''
self.saveSession()
QtGui.qApp.quit()
def closeEvent(self, event):
'''Clicked [x] to close main window'''
self.saveSession()
event.accept()
def setupEmptyTree(self):
'''Setup empty Tree at startup. '''
if hasattr(self, "tree"):
del self.tree
self.tree = QtGui.QTreeWidget(self)
self.tree.header().close()
def setupDock(self):
'''Setup empty Dock at startup. '''
self.dock = QtGui.QDockWidget("Classes", self)
self.dock.setWidget(self.tree)
self.dock.setFeatures(QtGui.QDockWidget.NoDockWidgetFeatures)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.dock)
def setupCentral(self):
'''Setup empty window supporting tabs at startup. '''
self.central = QtGui.QTabWidget()
self.central.setTabBar(CustomTabBar())
self.central.setTabsClosable(True)
self.central.tabCloseRequested.connect(self.tabCloseRequestedHandler)
self.central.currentChanged.connect(self.currentTabChanged)
self.setCentralWidget(self.central)
def tabCloseRequestedHandler(self, index):
self.central.removeTab(index)
def currentTabChanged(self, index):
androconf.debug("curentTabChanged -> %d" % index)
if index == -1:
return # all tab closed
def cleanCentral(self):
#TOFIX: Removes all the pages, but does not delete them.
self.central.clear()
def setupFileMenu(self):
fileMenu = QtGui.QMenu("&File", self)
self.menuBar().addMenu(fileMenu)
fileMenu.addAction("&Open...", self.openFile, "Ctrl+O")
fileMenu.addAction("&Save...", self.saveFile, "Ctrl+S")
fileMenu.addAction("E&xit", self.quit, "Ctrl+Q")
def setupHelpMenu(self):
helpMenu = QtGui.QMenu("&Help", self)
self.menuBar().addMenu(helpMenu)
helpMenu.addAction("&About", self.about)
helpMenu.addAction("About &Qt", QtGui.qApp.aboutQt)
def updateDockWithTree(self, empty=False):
'''Update the classes tree. Called when
- a new APK has been imported
- a classe has been renamed (displayed in the tree)
'''
if not hasattr(self, "d"):
androconf.debug("updateDockWithTree failed because no dalvik initialized")
return
if hasattr(self, "tree"):
del self.tree
self.tree = TreeWindow(win=self)
self.tree.setWindowTitle("Tree model")
self.dock.setWidget(self.tree)
self.tree.fill(self.d.get_classes())
def openSourceWindow(self, path, method=""):
'''Main function to open a .java source window
It checks if it already opened and open that tab,
otherwise, initialize a new window.
'''
sourcewin = self.getMeSourceWindowIfExists(path)
if not sourcewin:
sourcewin = SourceWindow(win=self, path=path)
sourcewin.reload_java_sources()
self.central.addTab(sourcewin, sourcewin.title)
self.central.setTabToolTip(self.central.indexOf(sourcewin), sourcewin.path)
if method:
sourcewin.browse_to_method(method)
self.central.setCurrentWidget(sourcewin)
def getMeSourceWindowIfExists(self, path):
'''Helper for openSourceWindow'''
for idx in range(self.central.count()):
if path == self.central.tabToolTip(idx):
androconf.debug("Tab %s already opened at: %d" % (path, idx))
return self.central.widget(idx)
return None
def doesClassExist(self, path):
arg = class2func(path)
try:
getattr(self.d, arg)
except AttributeError:
return False
return True
|
py | 1a5243fb20221a3a8753fc118ce48d808c7a942d |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import lsp_admin_group_exclude_any
import lsp_admin_group_include_any
import lsp_admin_group_include_all
class lsp_admin_group(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls - based on the path /brocade_mpls_rpc/show-mpls-lsp-name-debug/output/lsp/show-mpls-lsp-extensive-info/show-mpls-lsp-instances-info/lsp-instances/lsp-config-admin-groups/lsp-admin-group. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__lsp_admin_group_exclude_any','__lsp_admin_group_include_any','__lsp_admin_group_include_all',)
_yang_name = 'lsp-admin-group'
_rest_name = ''
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__lsp_admin_group_exclude_any = YANGDynClass(base=YANGListType("lsp_admin_group_exclude_any_group_id",lsp_admin_group_exclude_any.lsp_admin_group_exclude_any, yang_name="lsp-admin-group-exclude-any", rest_name="lsp-admin-group-exclude-any", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-admin-group-exclude-any-group-id', extensions=None), is_container='list', yang_name="lsp-admin-group-exclude-any", rest_name="lsp-admin-group-exclude-any", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
self.__lsp_admin_group_include_any = YANGDynClass(base=YANGListType("lsp_admin_group_include_any_group_id",lsp_admin_group_include_any.lsp_admin_group_include_any, yang_name="lsp-admin-group-include-any", rest_name="lsp-admin-group-include-any", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-admin-group-include-any-group-id', extensions=None), is_container='list', yang_name="lsp-admin-group-include-any", rest_name="lsp-admin-group-include-any", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
self.__lsp_admin_group_include_all = YANGDynClass(base=YANGListType("lsp_admin_group_include_all_group_id",lsp_admin_group_include_all.lsp_admin_group_include_all, yang_name="lsp-admin-group-include-all", rest_name="lsp-admin-group-include-all", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-admin-group-include-all-group-id', extensions=None), is_container='list', yang_name="lsp-admin-group-include-all", rest_name="lsp-admin-group-include-all", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_mpls_rpc', u'show-mpls-lsp-name-debug', u'output', u'lsp', u'show-mpls-lsp-extensive-info', u'show-mpls-lsp-instances-info', u'lsp-instances', u'lsp-config-admin-groups', u'lsp-admin-group']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'show-mpls-lsp-name-debug', u'output', u'lsp', u'lsp-instances', u'lsp-config-admin-groups']
def _get_lsp_admin_group_exclude_any(self):
"""
Getter method for lsp_admin_group_exclude_any, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_name_debug/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_config_admin_groups/lsp_admin_group/lsp_admin_group_exclude_any (list)
"""
return self.__lsp_admin_group_exclude_any
def _set_lsp_admin_group_exclude_any(self, v, load=False):
"""
Setter method for lsp_admin_group_exclude_any, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_name_debug/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_config_admin_groups/lsp_admin_group/lsp_admin_group_exclude_any (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_admin_group_exclude_any is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_admin_group_exclude_any() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("lsp_admin_group_exclude_any_group_id",lsp_admin_group_exclude_any.lsp_admin_group_exclude_any, yang_name="lsp-admin-group-exclude-any", rest_name="lsp-admin-group-exclude-any", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-admin-group-exclude-any-group-id', extensions=None), is_container='list', yang_name="lsp-admin-group-exclude-any", rest_name="lsp-admin-group-exclude-any", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_admin_group_exclude_any must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("lsp_admin_group_exclude_any_group_id",lsp_admin_group_exclude_any.lsp_admin_group_exclude_any, yang_name="lsp-admin-group-exclude-any", rest_name="lsp-admin-group-exclude-any", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-admin-group-exclude-any-group-id', extensions=None), is_container='list', yang_name="lsp-admin-group-exclude-any", rest_name="lsp-admin-group-exclude-any", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)""",
})
self.__lsp_admin_group_exclude_any = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_admin_group_exclude_any(self):
self.__lsp_admin_group_exclude_any = YANGDynClass(base=YANGListType("lsp_admin_group_exclude_any_group_id",lsp_admin_group_exclude_any.lsp_admin_group_exclude_any, yang_name="lsp-admin-group-exclude-any", rest_name="lsp-admin-group-exclude-any", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-admin-group-exclude-any-group-id', extensions=None), is_container='list', yang_name="lsp-admin-group-exclude-any", rest_name="lsp-admin-group-exclude-any", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
def _get_lsp_admin_group_include_any(self):
"""
Getter method for lsp_admin_group_include_any, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_name_debug/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_config_admin_groups/lsp_admin_group/lsp_admin_group_include_any (list)
"""
return self.__lsp_admin_group_include_any
def _set_lsp_admin_group_include_any(self, v, load=False):
"""
Setter method for lsp_admin_group_include_any, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_name_debug/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_config_admin_groups/lsp_admin_group/lsp_admin_group_include_any (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_admin_group_include_any is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_admin_group_include_any() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("lsp_admin_group_include_any_group_id",lsp_admin_group_include_any.lsp_admin_group_include_any, yang_name="lsp-admin-group-include-any", rest_name="lsp-admin-group-include-any", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-admin-group-include-any-group-id', extensions=None), is_container='list', yang_name="lsp-admin-group-include-any", rest_name="lsp-admin-group-include-any", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_admin_group_include_any must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("lsp_admin_group_include_any_group_id",lsp_admin_group_include_any.lsp_admin_group_include_any, yang_name="lsp-admin-group-include-any", rest_name="lsp-admin-group-include-any", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-admin-group-include-any-group-id', extensions=None), is_container='list', yang_name="lsp-admin-group-include-any", rest_name="lsp-admin-group-include-any", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)""",
})
self.__lsp_admin_group_include_any = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_admin_group_include_any(self):
self.__lsp_admin_group_include_any = YANGDynClass(base=YANGListType("lsp_admin_group_include_any_group_id",lsp_admin_group_include_any.lsp_admin_group_include_any, yang_name="lsp-admin-group-include-any", rest_name="lsp-admin-group-include-any", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-admin-group-include-any-group-id', extensions=None), is_container='list', yang_name="lsp-admin-group-include-any", rest_name="lsp-admin-group-include-any", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
def _get_lsp_admin_group_include_all(self):
"""
Getter method for lsp_admin_group_include_all, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_name_debug/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_config_admin_groups/lsp_admin_group/lsp_admin_group_include_all (list)
"""
return self.__lsp_admin_group_include_all
def _set_lsp_admin_group_include_all(self, v, load=False):
"""
Setter method for lsp_admin_group_include_all, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_name_debug/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_instances_info/lsp_instances/lsp_config_admin_groups/lsp_admin_group/lsp_admin_group_include_all (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_admin_group_include_all is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_admin_group_include_all() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("lsp_admin_group_include_all_group_id",lsp_admin_group_include_all.lsp_admin_group_include_all, yang_name="lsp-admin-group-include-all", rest_name="lsp-admin-group-include-all", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-admin-group-include-all-group-id', extensions=None), is_container='list', yang_name="lsp-admin-group-include-all", rest_name="lsp-admin-group-include-all", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_admin_group_include_all must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("lsp_admin_group_include_all_group_id",lsp_admin_group_include_all.lsp_admin_group_include_all, yang_name="lsp-admin-group-include-all", rest_name="lsp-admin-group-include-all", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-admin-group-include-all-group-id', extensions=None), is_container='list', yang_name="lsp-admin-group-include-all", rest_name="lsp-admin-group-include-all", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)""",
})
self.__lsp_admin_group_include_all = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_admin_group_include_all(self):
self.__lsp_admin_group_include_all = YANGDynClass(base=YANGListType("lsp_admin_group_include_all_group_id",lsp_admin_group_include_all.lsp_admin_group_include_all, yang_name="lsp-admin-group-include-all", rest_name="lsp-admin-group-include-all", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-admin-group-include-all-group-id', extensions=None), is_container='list', yang_name="lsp-admin-group-include-all", rest_name="lsp-admin-group-include-all", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
lsp_admin_group_exclude_any = __builtin__.property(_get_lsp_admin_group_exclude_any, _set_lsp_admin_group_exclude_any)
lsp_admin_group_include_any = __builtin__.property(_get_lsp_admin_group_include_any, _set_lsp_admin_group_include_any)
lsp_admin_group_include_all = __builtin__.property(_get_lsp_admin_group_include_all, _set_lsp_admin_group_include_all)
_pyangbind_elements = {'lsp_admin_group_exclude_any': lsp_admin_group_exclude_any, 'lsp_admin_group_include_any': lsp_admin_group_include_any, 'lsp_admin_group_include_all': lsp_admin_group_include_all, }
|
py | 1a524434dd150b55c38e1d8f0494d92e6cd75dcf | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Gromacs(CMakePackage):
"""GROMACS (GROningen MAchine for Chemical Simulations) is a molecular
dynamics package primarily designed for simulations of proteins, lipids
and nucleic acids. It was originally developed in the Biophysical
Chemistry department of University of Groningen, and is now maintained
by contributors in universities and research centers across the world.
GROMACS is one of the fastest and most popular software packages
available and can run on CPUs as well as GPUs. It is free, open source
released under the GNU General Public License. Starting from version 4.6,
GROMACS is released under the GNU Lesser General Public License.
"""
homepage = 'http://www.gromacs.org'
url = 'http://ftp.gromacs.org/gromacs/gromacs-5.1.2.tar.gz'
git = 'https://github.com/gromacs/gromacs.git'
maintainers = ['junghans', 'marvinbernhardt']
version('develop', branch='master')
version('2019.4', sha256='ba4366eedfc8a1dbf6bddcef190be8cd75de53691133f305a7f9c296e5ca1867')
version('2019.3', sha256='4211a598bf3b7aca2b14ad991448947da9032566f13239b1a05a2d4824357573')
version('2019.2', sha256='bcbf5cc071926bc67baa5be6fb04f0986a2b107e1573e15fadcb7d7fc4fb9f7e')
version('2019.1', sha256='b2c37ed2fcd0e64c4efcabdc8ee581143986527192e6e647a197c76d9c4583ec')
version('2019', sha256='c5b281a5f0b5b4eeb1f4c7d4dc72f96985b566561ca28acc9c7c16f6ee110d0b')
version('2018.8', sha256='3776923415df4bc78869d7f387c834141fdcda930b2e75be979dc59ecfa6ebec')
version('2018.5', sha256='32261df6f7ec4149fc0508f9af416953d056e281590359838c1ed6644ba097b8')
version('2018.4', sha256='6f2ee458c730994a8549d6b4f601ecfc9432731462f8bd4ffa35d330d9aaa891')
version('2018.3', sha256='4423a49224972969c52af7b1f151579cea6ab52148d8d7cbae28c183520aa291')
version('2018.2', sha256='4bdde8120c510b6543afb4b18f82551fddb11851f7edbd814aa24022c5d37857')
version('2018.1', sha256='4d3533340499323fece83b4a2d4251fa856376f2426c541e00b8e6b4c0d705cd')
version('2018', sha256='deb5d0b749a52a0c6083367b5f50a99e08003208d81954fb49e7009e1b1fd0e9')
version('2016.6', sha256='bac0117d2cad21f9b94fe5b854fb9ae7435b098a6da4e732ee745f18e52473d7')
version('2016.5', sha256='57db26c6d9af84710a1e0c47a1f5bf63a22641456448dcd2eeb556ebd14e0b7c')
version('2016.4', sha256='4be9d3bfda0bdf3b5c53041e0b8344f7d22b75128759d9bfa9442fe65c289264')
version('2016.3', sha256='7bf00e74a9d38b7cef9356141d20e4ba9387289cbbfd4d11be479ef932d77d27')
version('5.1.5', sha256='c25266abf07690ecad16ed3996899b1d489cbb1ef733a1befb3b5c75c91a703e')
version('5.1.4', sha256='0f3793d8f1f0be747cf9ebb0b588fb2b2b5dc5acc32c3046a7bee2d2c03437bc')
version('5.1.2', sha256='39d6f1d7ae8ba38cea6089da40676bfa4049a49903d21551abc030992a58f304')
version('4.5.5', sha256='e0605e4810b0d552a8761fef5540c545beeaf85893f4a6e21df9905a33f871ba')
variant('mpi', default=True, description='Activate MPI support')
variant('shared', default=True,
description='Enables the build of shared libraries')
variant(
'double', default=False,
description='Produces a double precision version of the executables')
variant('plumed', default=False, description='Enable PLUMED support')
variant('cuda', default=False, description='Enable CUDA support')
variant('build_type', default='RelWithDebInfo',
description='The build type to build',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel',
'Reference', 'RelWithAssert', 'Profile'))
variant('simd', default='auto',
description='The SIMD instruction set to use',
values=('auto', 'none', 'SSE2', 'SSE4.1', 'AVX_128_FMA', 'AVX_256',
'AVX2_128', 'AVX2_256', 'AVX_512', 'AVX_512_KNL',
'IBM_QPX', 'Sparc64_HPC_ACE', 'IBM_VMX', 'IBM_VSX',
'ARM_NEON', 'ARM_NEON_ASIMD'))
variant('rdtscp', default=True, description='Enable RDTSCP instruction usage')
variant('mdrun_only', default=False,
description='Enables the build of a cut-down version'
' of libgromacs and/or the mdrun program')
variant('openmp', default=True, description='Enables OpenMP at configure time')
variant('double_precision', default=False, description='Enables a double-precision configuration')
depends_on('mpi', when='+mpi')
depends_on('plumed+mpi', when='+plumed+mpi')
depends_on('plumed~mpi', when='+plumed~mpi')
depends_on('fftw')
depends_on('[email protected]:3.99.99', type='build')
depends_on('[email protected]:3.99.99', type='build', when='@2018:')
depends_on('cuda', when='+cuda')
patch('gmxDetectCpu-cmake-3.14.patch', when='@2018:2019.3^[email protected]:')
patch('gmxDetectSimd-cmake-3.14.patch', when='@:2017.99^[email protected]:')
def patch(self):
if '+plumed' in self.spec:
self.spec['plumed'].package.apply_patch(self)
def cmake_args(self):
options = []
if '+mpi' in self.spec:
options.append('-DGMX_MPI:BOOL=ON')
if '+double' in self.spec:
options.append('-DGMX_DOUBLE:BOOL=ON')
if '~shared' in self.spec:
options.append('-DBUILD_SHARED_LIBS:BOOL=OFF')
if '+cuda' in self.spec:
options.append('-DGMX_GPU:BOOL=ON')
options.append('-DCUDA_TOOLKIT_ROOT_DIR:STRING=' +
self.spec['cuda'].prefix)
else:
options.append('-DGMX_GPU:BOOL=OFF')
simd_value = self.spec.variants['simd'].value
if simd_value == 'auto':
pass
elif simd_value == 'none':
options.append('-DGMX_SIMD:STRING=None')
else:
options.append('-DGMX_SIMD:STRING=' + simd_value)
if '-rdtscp' in self.spec:
options.append('-DGMX_USE_RDTSCP:BOOL=OFF')
else:
options.append('-DGMX_USE_RDTSCP:BOOL=ON')
if '+mdrun_only' in self.spec:
options.append('-DGMX_BUILD_MDRUN_ONLY:BOOL=ON')
else:
options.append('-DGMX_BUILD_MDRUN_ONLY:BOOL=OFF')
if '~openmp' in self.spec:
options.append('-DGMX_OPENMP:BOOL=OFF')
else:
options.append('-DGMX_OPENMP:BOOL=ON')
if '+double_precision' in self.spec:
options.append('-DGMX_RELAXED_DOUBLE_PRECISION:BOOL=ON')
else:
options.append('-DGMX_RELAXED_DOUBLE_PRECISION:BOOL=OFF')
return options
|
py | 1a52449b84e0df4cab420882a765e6969f46ff2e | from typing import Optional, Union, Tuple
from torch_geometric.typing import OptTensor, Adj
import torch
from torch import Tensor
import torch.nn.functional as F
from torch.nn import Parameter as Param
from torch.nn import Parameter
from torch_scatter import scatter
from torch_sparse import SparseTensor, matmul, masked_select_nnz
from torch_geometric.nn.conv import MessagePassing
import math
def glorot(tensor):
if tensor is not None:
stdv = math.sqrt(6.0 / (tensor.size(-2) + tensor.size(-1)))
tensor.data.uniform_(-stdv, stdv)
def zeros(tensor):
if tensor is not None:
tensor.data.fill_(0)
@torch.jit._overload
def masked_edge_index(edge_index, edge_mask):
# type: (Tensor, Tensor) -> Tensor
pass
@torch.jit._overload
def masked_edge_index(edge_index, edge_mask):
# type: (SparseTensor, Tensor) -> SparseTensor
pass
def masked_edge_index(edge_index, edge_mask):
if isinstance(edge_index, Tensor):
return edge_index[:, edge_mask]
else:
return masked_select_nnz(edge_index, edge_mask, layout='coo')
class RGCNConv(MessagePassing):
r"""The relational graph convolutional operator from the `"Modeling
Relational Data with Graph Convolutional Networks"
<https://arxiv.org/abs/1703.06103>`_ paper
.. math::
\mathbf{x}^{\prime}_i = \mathbf{\Theta}_{\textrm{root}} \cdot
\mathbf{x}_i + \sum_{r \in \mathcal{R}} \sum_{j \in \mathcal{N}_r(i)}
\frac{1}{|\mathcal{N}_r(i)|} \mathbf{\Theta}_r \cdot \mathbf{x}_j,
where :math:`\mathcal{R}` denotes the set of relations, *i.e.* edge types.
Edge type needs to be a one-dimensional :obj:`torch.long` tensor which
stores a relation identifier
:math:`\in \{ 0, \ldots, |\mathcal{R}| - 1\}` for each edge.
.. note::
This implementation is as memory-efficient as possible by iterating
over each individual relation type.
Therefore, it may result in low GPU utilization in case the graph has a
large number of relations.
As an alternative approach, :class:`FastRGCNConv` does not iterate over
each individual type, but may consume a large amount of memory to
compensate.
We advise to check out both implementations to see which one fits your
needs.
Args:
in_channels (int or tuple): Size of each input sample. A tuple
corresponds to the sizes of source and target dimensionalities.
In case no input features are given, this argument should
correspond to the number of nodes in your graph.
out_channels (int): Size of each output sample.
num_relations (int): Number of relations.
num_bases (int, optional): If set to not :obj:`None`, this layer will
use the basis-decomposition regularization scheme where
:obj:`num_bases` denotes the number of bases to use.
(default: :obj:`None`)
num_blocks (int, optional): If set to not :obj:`None`, this layer will
use the block-diagonal-decomposition regularization scheme where
:obj:`num_blocks` denotes the number of blocks to use.
(default: :obj:`None`)
aggr (string, optional): The aggregation scheme to use
(:obj:`"add"`, :obj:`"mean"`, :obj:`"max"`).
(default: :obj:`"mean"`)
root_weight (bool, optional): If set to :obj:`False`, the layer will
not add transformed root node features to the output.
(default: :obj:`True`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def __init__(self, in_channels: Union[int, Tuple[int, int]],
out_channels: int,
num_relations: int,
num_bases: Optional[int] = None,
num_blocks: Optional[int] = None,
aggr: str = 'mean',
root_weight: bool = True,
bias: bool = True, **kwargs): # yapf: disable
super(RGCNConv, self).__init__(aggr=aggr, node_dim=0, **kwargs)
if num_bases is not None and num_blocks is not None:
raise ValueError('Can not apply both basis-decomposition and '
'block-diagonal-decomposition at the same time.')
self.in_channels = in_channels
self.out_channels = out_channels
self.num_relations = num_relations
self.num_bases = num_bases
self.num_blocks = num_blocks
if isinstance(in_channels, int):
in_channels = (in_channels, in_channels)
self.in_channels_l = in_channels[0]
if num_bases is not None:
self.weight = Parameter(
torch.Tensor(num_bases, in_channels[0], out_channels))
self.comp = Parameter(torch.Tensor(num_relations, num_bases))
elif num_blocks is not None:
assert (in_channels[0] % num_blocks == 0
and out_channels % num_blocks == 0)
self.weight = Parameter(
torch.Tensor(num_relations, num_blocks,
in_channels[0] // num_blocks,
out_channels // num_blocks))
self.register_parameter('comp', None)
else:
self.weight = Parameter(
torch.Tensor(num_relations, in_channels[0], out_channels))
self.register_parameter('comp', None)
if root_weight:
self.root = Param(torch.Tensor(in_channels[1], out_channels))
else:
self.register_parameter('root', None)
if bias:
self.bias = Param(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
glorot(self.weight)
glorot(self.comp)
glorot(self.root)
zeros(self.bias)
def forward(self, x: Union[OptTensor, Tuple[OptTensor, Tensor]],
edge_index: Adj, edge_type: OptTensor = None):
r"""
Args:
x: The input node features. Can be either a :obj:`[num_nodes,
in_channels]` node feature matrix, or an optional
one-dimensional node index tensor (in which case input features
are treated as trainable node embeddings).
Furthermore, :obj:`x` can be of type :obj:`tuple` denoting
source and destination node features.
edge_type: The one-dimensional relation type/index for each edge in
:obj:`edge_index`.
Should be only :obj:`None` in case :obj:`edge_index` is of type
:class:`torch_sparse.tensor.SparseTensor`.
(default: :obj:`None`)
"""
# Convert input features to a pair of node features or node indices.
x_l: OptTensor = None
if isinstance(x, tuple):
x_l = x[0]
else:
x_l = x
if x_l is None:
x_l = torch.arange(self.in_channels_l, device=self.weight.device)
x_r: Tensor = x_l
if isinstance(x, tuple):
x_r = x[1]
size = (x_l.size(0), x_r.size(0))
if isinstance(edge_index, SparseTensor):
edge_type = edge_index.storage.value()
assert edge_type is not None
# propagate_type: (x: Tensor)
out = torch.zeros(x_r.size(0), self.out_channels, device=x_r.device)
weight = self.weight
if self.num_bases is not None: # Basis-decomposition =================
weight = (self.comp @ weight.view(self.num_bases, -1)).view(
self.num_relations, self.in_channels_l, self.out_channels)
if self.num_blocks is not None: # Block-diagonal-decomposition =====
if x_l.dtype == torch.long and self.num_blocks is not None:
raise ValueError('Block-diagonal decomposition not supported '
'for non-continuous input features.')
for i in range(self.num_relations):
tmp = masked_edge_index(edge_index, edge_type == i)
h = self.propagate(tmp, x=x_l, size=size)
h = h.view(-1, weight.size(1), weight.size(2))
h = torch.einsum('abc,bcd->abd', h, weight[i])
out += h.contiguous().view(-1, self.out_channels)
else: # No regularization/Basis-decomposition ========================
for i in range(self.num_relations):
tmp = masked_edge_index(edge_index, edge_type == i)
if x_l.dtype == torch.long:
out += self.propagate(tmp, x=weight[i, x_l], size=size)
else:
h = self.propagate(tmp, x=x_l, size=size)
out = out + (h @ weight[i])
root = self.root
if root is not None:
out += root[x_r] if x_r.dtype == torch.long else x_r @ root
if self.bias is not None:
out += self.bias
return out
def message(self, x_j: Tensor) -> Tensor:
return x_j
def message_and_aggregate(self, adj_t: SparseTensor, x: Tensor) -> Tensor:
adj_t = adj_t.set_value(None, layout=None)
return matmul(adj_t, x, reduce=self.aggr)
def __repr__(self):
return '{}({}, {}, num_relations={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels,
self.num_relations)
class FastRGCNConv(RGCNConv):
r"""See :class:`RGCNConv`."""
def forward(self, x: Union[OptTensor, Tuple[OptTensor, Tensor]],
edge_index: Adj, edge_type: OptTensor = None):
""""""
self.fuse = False
assert self.aggr in ['add', 'sum', 'mean']
# Convert input features to a pair of node features or node indices.
x_l: OptTensor = None
if isinstance(x, tuple):
x_l = x[0]
else:
x_l = x
if x_l is None:
x_l = torch.arange(self.in_channels_l, device=self.weight.device)
x_r: Tensor = x_l
if isinstance(x, tuple):
x_r = x[1]
size = (x_l.size(0), x_r.size(0))
# propagate_type: (x: Tensor, edge_type: OptTensor)
out = self.propagate(edge_index, x=x_l, edge_type=edge_type, size=size)
root = self.root
if root is not None:
out += root[x_r] if x_r.dtype == torch.long else x_r @ root
if self.bias is not None:
out += self.bias
return out
def message(self, x_j: Tensor, edge_type: Tensor, index: Tensor) -> Tensor:
weight = self.weight
if self.num_bases is not None: # Basis-decomposition =================
weight = (self.comp @ weight.view(self.num_bases, -1)).view(
self.num_relations, self.in_channels_l, self.out_channels)
if self.num_blocks is not None: # Block-diagonal-decomposition =======
if x_j.dtype == torch.long:
raise ValueError('Block-diagonal decomposition not supported '
'for non-continuous input features.')
weight = weight[edge_type].view(-1, weight.size(2), weight.size(3))
x_j = x_j.view(-1, 1, weight.size(1))
return torch.bmm(x_j, weight).view(-1, self.out_channels)
else: # No regularization/Basis-decomposition ========================
if x_j.dtype == torch.long:
weight_index = edge_type * weight.size(1) + index
return weight.view(-1, self.out_channels)[weight_index]
return torch.bmm(x_j.unsqueeze(-2), weight[edge_type]).squeeze(-2)
def aggregate(self, inputs: Tensor, edge_type: Tensor, index: Tensor,
dim_size: Optional[int] = None) -> Tensor:
# Compute normalization in separation for each `edge_type`.
if self.aggr == 'mean':
norm = F.one_hot(edge_type, self.num_relations).to(torch.float)
norm = scatter(norm, index, dim=0, dim_size=dim_size)[index]
norm = torch.gather(norm, 1, edge_type.view(-1, 1))
norm = 1. / norm.clamp_(1.)
inputs = norm * inputs
return scatter(inputs, index, dim=self.node_dim, dim_size=dim_size)
|
py | 1a5244af8747e90326c6854d93af4558f5b7e1ed | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Module containing Net information storage."""
#from typing import Tuple
import pandas as pd
from .. import logger
class QNet():
"""Use DataFrame to hold Net Information about the connected pins of a
design.
There is one uniqe net_id for each connected pin.
"""
def __init__(self):
"""Hold the net information of all the USED pins within a design."""
self.column_names = ['net_id', 'component_id', 'pin_name']
self._net_info = pd.DataFrame(columns=self.column_names)
self._qnet_latest_assigned_id = 0
self.logger = logger # type: logging.Logger
def _get_new_net_id(self) -> int:
"""Provide uniqe new qnet_id.
Returns:
int: ID to use for storing a new net within _net_info.
"""
self._qnet_latest_assigned_id += 1
return self._qnet_latest_assigned_id
@property
def qnet_latest_assigned_id(self) -> int:
"""Return unique number for each net in table.
Returns:
int: For user of the design class to know the lastest id added to _net_info.
"""
return self._qnet_latest_assigned_id
@property
def net_info(self) -> pd.DataFrame:
"""Provide table of all nets within the design.
Returns:
pd.DataFrame: Table of the net of pins within design.
"""
return self._net_info
def add_pins_to_table(self, comp1_id: int, pin1_name: str, comp2_id: int,
pin2_name: str) -> int:
"""Add two entries into the _net_info table. If either component/pin is
already in net_info, the connection will NOT be added to the net_info.
Arguments:
comp1_id (int): Name of component 1.
pin1_name (str): Corresponding pin name for component1.
comp2_id (int): Name of component 2.
pint2 (str): Corresponding pin name for component2.
Returns:
int: 0 if not added to list, otherwise the netid
"""
net_id = 0 # Zero mean false, the pin was not added to _net_info
if not isinstance(comp1_id, int):
self.logger.warning(
f'Expected an int, but have {comp1_id}. The pins are were not entered to the net_info table.'
)
return net_id
if not isinstance(comp2_id, int):
self.logger.warning(
f'Expected an int, but have {comp2_id}. The pins are were not entered to the net_info table.'
)
return net_id
if not isinstance(pin1_name, str):
self.logger.warning(
f'Expected a string, but have {pin1_name}. The pins are were not entered to the net_info table.'
)
return net_id
if not isinstance(pin2_name, str):
self.logger.warning(
f'Expected a string, but have {pin2_name}. The pins are were not entered to the net_info table.'
)
return net_id
# Confirm the component-pin combonation is NOT in _net_info, before adding them.
for (netID, component_id,
pin_name) in self._net_info.itertuples(index=False):
if ((component_id == comp1_id) and (pin_name == pin1_name)):
self.logger.warning(
f'Component: {comp1_id} and pin: {pin1_name} are already in net_info with net_id {netID}'
)
return net_id
if ((component_id == comp2_id) and (pin_name == pin2_name)):
self.logger.warning(
f'Component: {comp2_id} and pin: {pin2_name} are already in net_info with net_id {netID}'
)
return net_id
net_id = self._get_new_net_id()
entry1 = [net_id, comp1_id, pin1_name]
entry2 = [net_id, comp2_id, pin2_name]
temp_df = pd.DataFrame([entry1, entry2], columns=self.column_names)
self._net_info = self._net_info.append(temp_df, ignore_index=True)
# print(self._net_info)
return net_id
def delete_net_id(self, net_id_to_remove: int):
"""Removes the two entries with net_id_to_remove. If id is in
_net_info, the entry will be removed.
Arguments:
net_id_to_remove (int): The id to remove.
"""
self._net_info.drop(
self._net_info.index[self._net_info['net_id'] == net_id_to_remove],
inplace=True)
return
def delete_all_pins_for_component(self, component_id_to_remove: int) -> set:
"""Delete all the pins for a given component id.
Args:
component_id_to_remove (int): Component ID to remove
Returns:
set: All deleted ids
"""
all_net_id_deleted = set()
for (netID, component_id,
pin_name) in self._net_info.itertuples(index=False):
if (component_id == component_id_to_remove):
all_net_id_deleted.add(netID)
self.delete_net_id(netID)
return all_net_id_deleted
def get_components_and_pins_for_netid(
self, net_id_search: int) -> pd.core.frame.DataFrame:
"""Search with a net_id to get component id and pin name.
Arguments:
net_id_search (int): Unique net id which connects two pins within a design.
Returns:
pandas.DataFrame: Two rows of the net_info which have the same net_id_search.
"""
df_subset_based_on_net_id = self._net_info[(
self._net_info['net_id'] == net_id_search)]
return df_subset_based_on_net_id
|
py | 1a5245aedfc1b9490a3b54bc50ec42cdb36ba15a | from discord.ext import commands
import discord, io
class Core(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.update = {
"allowUpdate": True,
"url": "https://raw.github.com/Akumatic/Akuma-Matata/master/extensions/core.py",
"private": False
}
def detectSetGame(self):
return f" | {self.bot.cfg['game']}" if self.bot.cfg["game"] != "" else ""
#Listener
@commands.Cog.listener()
async def on_ready(self):
print("Bot is running!")
game = f"{self.bot.cfg['prefix']}help{self.detectSetGame()}"
await self.bot.change_presence(status=discord.Status.online, activity=discord.Game(name=game))
@commands.Cog.listener()
async def on_guild_join(self, guild):
self.bot.serverCfg[str(guild.id)] = {}
self.bot.writeJSON("server.json", self.bot.serverCfg)
@commands.Cog.listener()
async def on_guild_remove(self, guild):
del self.bot.serverCfg[str(guild.id)]
self.bot.writeJSON("server.json", self.bot.serverCfg)
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
e = discord.Embed(color=discord.Color.red(), title="Error")
if isinstance(error, commands.CommandNotFound):
#e.add_field(name="Command Not Found", value="The command you tried to use does not exist.")
return #await ctx.author.send(embed=e)
if isinstance(error, commands.NotOwner):
e.add_field(name="Not The Owner", value="Only the owner of this bot can use this command.")
return await ctx.send(embed=e)
if isinstance(error, commands.NoPrivateMessage):
e.add_field(name="No Direct Message", value="This command is only usable in a server.")
return await ctx.send(embed=e)
if isinstance(error, commands.MissingPermissions):
e.add_field(name="Missing Permissions", value="You don't have the permissions to use this command.")
return await ctx.send(embed=e)
e.add_field(name="Source", value=ctx.message.channel, inline=False)
e.add_field(name="Trigger", value=ctx.message.content, inline=False)
e.add_field(name="Error", value=f"{type(error).__name__} ({error})", inline=False)
await ctx.send(embed=e)
#Commands
@commands.command()
@commands.is_owner()
async def stop(self, ctx):
ext = self.bot.extensions
while len(ext) > 0:
self.bot.unload_extension(list(ext.keys())[0])
await self.bot.close()
@commands.command()
@commands.is_owner()
async def setGame(self, ctx, *, msg : str = None):
self.bot.cfg["game"] = "" if msg == None else msg
game = f"{self.bot.cfg['prefix']}help{self.detectSetGame()}"
await self.bot.change_presence(status=discord.Status.online, activity=discord.Game(name=game))
@commands.command(hidden=True)
@commands.is_owner()
async def load(self, ctx, ext : str = None, json : bool = False):
"""Loads a new python file from \"extension\" folder.
First argument is the name of python file without .py extension.
(Optional) If second argument is True, it will be autoloaded"""
e = discord.Embed(title="Loading Extension")
if ext == None:
e.color = discord.Color.red()
e.add_field(name="No extension specified", value="Please specify the name of the extension.")
return await ctx.send(embed=e)
try:
self.bot.load_extension("extensions." + ext)
e.color = discord.Color.green()
e.add_field(name="Extension loaded", value=f"`{ext}` successfully loaded.", inline=False)
if json and ext not in self.bot.cfg["extensions"]:
self.bot.cfg["extensions"].append(ext)
self.bot.writeJSON("settings.json", self.bot.cfg)
e.add_field(name="Autoload", value=f"`{ext}` was added to autostart extensions.", inline=False)
except Exception as ex:
e.color = discord.Color.red()
e.add_field(name=f"Failed to load extension `{ext}`", value=f"{type(ex).__name__} ({ex})")
await ctx.send(embed=e)
@commands.command(hidden=True)
@commands.is_owner()
async def unload(self, ctx, ext : str = None, json : bool = False):
"""Unloads an extension.
First argument is the name of the extension.
(Optional) If second argument is True, it will be removed from autoload"""
e = discord.Embed(title="Unloading Extension")
if ext == None:
e.color = discord.Color.red()
e.add_field(name="No extension specified", value="Please specify the name of the extension.")
return await ctx.send(embed=e)
if ("extensions." + ext) in self.bot.extensions:
self.bot.unload_extension("extensions." + ext)
e.color = discord.Color.green()
e.add_field(name="Extension unloaded", value=f"`{ext}` successfully unloaded.", inline=False)
if json and ext in self.bot.cfg["extensions"]:
self.bot.cfg["extensions"].remove(ext)
self.bot.writeJSON("settings.json", self.bot.cfg)
e.add_field(name="Autoload", value=f"`{ext}` was removed from autostart extensions.", inline=False)
else:
e.color = discord.Color.red()
e.add_field(name=f"Failed to unload `{ext}`", value=f"`{ext}` not loaded")
await ctx.send(embed=e)
@commands.command(hidden=True)
@commands.is_owner()
async def reload(self, ctx, ext : str = None):
"""Reloads an extension"""
e = discord.Embed(title="Reloading Extension: Unloading")
if ext == None:
e.color = discord.Color.red()
e.add_field(name="No extension specified", value="Please specify the name of the extension.")
return await ctx.send(embed=e)
if ("extensions." + ext) in self.bot.extensions:
self.bot.unload_extension("extensions." + ext)
e.color = discord.Color.green()
e.add_field(name="Extension unloaded", value=f"`{ext}` successfully unloaded.", inline=False)
await ctx.send(embed=e)
e = discord.Embed(title="Reloading Extension: Loading")
try:
self.bot.load_extension("extensions." + ext)
e.color = discord.Color.green()
e.add_field(name="Extension loaded", value=f"`{ext}` successfully loaded.", inline=False)
except Exception as ex:
e.color = discord.Color.red()
e.add_field(name=f"Failed to load extension `{ext}`", value=f"{type(ex).__name__} ({ex})")
else:
e.color = discord.Color.red()
e.add_field(name=f"Failed to unload `{ext}`", value=f"`{ext}` not loaded")
await ctx.send(embed=e)
@commands.command(hidden=True)
@commands.is_owner()
async def printExt(self, ctx):
"""Prints out every loaded extension"""
string = []
temp = None
for ext in self.bot.extensions:
temp = ext.split(".")
string.append(temp[-1] if len(temp) > 1 else temp[0])
e = discord.Embed(color=discord.Color.blue())
e.add_field(name="Loaded extensions", value=', '.join(string))
await ctx.send(embed=e)
def setup(bot):
bot.add_cog(Core(bot)) |
py | 1a5245d97545c0d879973a3ea6bc43ca86c70117 | # encoding: utf-8
from opendatatools.common import RestAgent
from opendatatools.common import date_convert, remove_non_numerical
from bs4 import BeautifulSoup
import datetime
import json
import pandas as pd
import io
from opendatatools.futures.futures_agent import _concat_df
import zipfile
def time_map(x):
if x == '':
return ''
else:
return datetime.datetime.strptime(x, '%Y%m%d').strftime('%Y-%m-%d')
def plan_map(x):
if '派' not in x:
return 0
else:
return '%.3f' % (float(x.split('派')[-1].split('元')[0])/10)
class SHExAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
headers = {
"Accept": '*/*',
'Referer': 'http://www.sse.com.cn/market/sseindex/indexlist/',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
}
self.add_headers(headers)
def get_index_list(self):
url = 'http://query.sse.com.cn/commonSoaQuery.do'
data = {
'sqlId': 'DB_SZZSLB_ZSLB',
}
response = self.do_request(url, data)
rsp = json.loads(response)
if 'pageHelp' in rsp:
data = rsp['pageHelp']['data']
return pd.DataFrame(data)
else:
return None
def get_index_component(self, index):
url = 'http://query.sse.com.cn/commonSoaQuery.do'
data = {
'sqlId': 'DB_SZZSLB_CFGLB',
'indexCode' : index,
}
response = self.do_request(url, data)
rsp = json.loads(response)
if 'pageHelp' in rsp:
data = rsp['pageHelp']['data']
return pd.DataFrame(data)
else:
return None
def get_dividend(self, code):
url = 'http://query.sse.com.cn/commonQuery.do'
data = {
'sqlId' : 'COMMON_SSE_GP_SJTJ_FHSG_AGFH_L_NEW',
'security_code_a' : code,
}
response = self.do_request(url, data)
rsp = json.loads(response)
if 'result' in rsp:
data = rsp['result']
return pd.DataFrame(data)
else:
return None
def get_rzrq_info(self, date):
date2 = date_convert(date, '%Y-%m-%d', '%Y%m%d')
url = 'http://www.sse.com.cn/market/dealingdata/overview/margin/a/rzrqjygk%s.xls' % (date2)
response = self.do_request(url, None, method='GET', type='binary')
if response is not None:
excel = pd.ExcelFile(io.BytesIO(response))
df_total = excel.parse('汇总信息').dropna()
df_detail = excel.parse('明细信息').dropna()
df_total['date'] = date
df_detail['date'] = date
return df_total, df_detail
else:
return None, None
def get_pledge_info(self, date):
date2 = date_convert(date, '%Y-%m-%d', '%Y%m%d')
url = 'http://query.sse.com.cn/exportExcel/exportStockPledgeExcle.do?tradeDate=%s' % (date2)
response = self.do_request(url, None, method='GET', type='binary')
if response is not None:
excel = pd.ExcelFile(io.BytesIO(response))
df_total = excel.parse('交易金额汇总').dropna()
df_detail = excel.parse('交易数量明细').dropna()
df_total['date'] = date
df_detail['date'] = date
return df_total, df_detail
else:
return None, None
class SZExAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
def get_index_list(self):
#url = 'http://www.szse.cn/szseWeb/ShowReport.szse'
url = 'http://www.szse.cn/api/report/ShowReport'
data = {
'SHOWTYPE' : 'xls',
'CATALOGID' : '1812',
}
response = self.do_request(url, data, method='GET', type='binary')
df = pd.read_excel(io.BytesIO(response))
return df
def get_index_component(self, index):
#url = 'http://www.szse.cn/szseWeb/ShowReport.szse'
url = 'http://www.szse.cn/api/report/ShowReport'
data = {
'SHOWTYPE': 'xls',
'CATALOGID': '1747',
'ZSDM' : index
}
response = self.do_request(url, data, method='GET', type='binary')
if response is not None:
df = pd.read_excel(io.BytesIO(response))
return df
else:
return None
def get_rzrq_info(self, date):
df_total = self._get_rzrq_total(date)
df_detail = self._get_rzrq_detail(date)
if df_total is not None:
df_total['date'] = date
if df_detail is not None:
df_detail['date'] = date
return df_total, df_detail
def _get_rzrq_total(self, date):
#url = 'http://www.szse.cn/szseWeb/ShowReport.szse'
url = 'http://www.szse.cn/api/report/ShowReport'
data = {
'SHOWTYPE': 'xls',
'CATALOGID': '1837_xxpl',
'TABKEY' : 'tab1',
"txtDate": date,
}
response = self.do_request(url, data, method='GET', type='binary')
if response is not None and len(response) > 0:
df = pd.read_excel(io.BytesIO(response))
return df
else:
return None
def _get_rzrq_detail(self, date):
#url = 'http://www.szse.cn/szseWeb/ShowReport.szse'
url = 'http://www.szse.cn/api/report/ShowReport'
data = {
'SHOWTYPE': 'xls',
'CATALOGID': '1837_xxpl',
'TABKEY': 'tab2',
"txtDate" : date,
}
response = self.do_request(url, data, method='GET', type='binary')
if response is not None and len(response) > 0:
df = pd.read_excel(io.BytesIO(response))
return df
else:
return None
def get_pledge_info(self, date):
df_total = self._get_pledge_info_total(date)
df_detail = self._get_pledge_info_detail(date)
if df_total is not None:
df_total['date'] = date
if df_detail is not None:
df_detail['date'] = date
df_detail['证券代码'] = df_detail['证券代码'].apply(lambda x: str(x).zfill(6))
return df_total, df_detail
def _get_pledge_info_total(self, date):
#url = 'http://www.szse.cn/szseWeb/ShowReport.szse'
url = 'http://www.szse.cn/api/report/ShowReport'
data = {
'SHOWTYPE': 'xls',
'CATALOGID': '1837_gpzyhgxx',
'TABKEY': 'tab1',
"txtDate" : date,
'ENCODE' : 1,
}
response = self.do_request(url, data, method='GET', type='binary')
if response is not None and len(response) > 0:
df = pd.read_excel(io.BytesIO(response))
return df
else:
return None
def _get_pledge_info_detail(self, date):
#url = 'http://www.szse.cn/szseWeb/ShowReport.szse'
url = 'http://www.szse.cn/api/report/ShowReport'
data = {
'SHOWTYPE': 'xls',
'CATALOGID': '1837_gpzyhgxx',
'TABKEY': 'tab2',
"txtDate" : date,
'ENCODE' : 1,
}
response = self.do_request(url, data, method='GET', type='binary')
if response is not None and len(response) > 0:
df = pd.read_excel(io.BytesIO(response))
return df
else:
return None
class CSIAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
def get_index_list(self):
url = 'http://www.csindex.com.cn/zh-CN/indices/index'
page = 1
result_data = []
while True:
data = {
"data_type" : "json",
"page" : page,
}
response = self.do_request(url, data, method='GET')
rsp = json.loads(response)
page = page + 1
print("fetching data at page %d" % (page) )
if "list" in rsp:
result_data.extend(rsp['list'])
if len(rsp['list']) == 0:
break
else:
return None
return pd.DataFrame(result_data)
def get_index_component(self, index):
url = 'http://www.csindex.com.cn/uploads/file/autofile/cons/%scons.xls' % (index)
response = self.do_request(url, None, method='GET', type='binary')
if response is not None:
df = pd.read_excel(io.BytesIO(response))
return df
else:
return None
class XueqiuAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
# 600000.SH -> SH600000
def convert_to_xq_symbol(self, symbol):
temp = symbol.split(".")
return temp[1] + temp[0]
def convert_to_xq_symbols(self, symbols):
result = ''
for symbol in symbols.split(','):
result = result + self.convert_to_xq_symbol(symbol) + ','
return result
# SH600000 -> 600000.SH
def convert_from_xq_symbol(self, symbol):
market = symbol[0:2]
code = symbol[2:]
return code + '.' + market
def prepare_cookies(self, url):
response = self.do_request(url, None)
if response is not None:
cookies = self.get_cookies()
return cookies
else:
return None
def get_quote(self, symbols):
url = 'https://stock.xueqiu.com/v5/stock/realtime/quotec.json'
data = {
'symbol' : self.convert_to_xq_symbols(symbols)
}
# {"data":[{"symbol":"SH000001","current":3073.8321,"percent":-1.15,"chg":-35.67,"timestamp":1528427643770,"volume":6670380300,"amount":8.03515860132E10,"market_capital":1.393367880255658E13,"float_market_capital":1.254120000811718E13,"turnover_rate":0.64,"amplitude":0.91,"high":3100.6848,"low":3072.5418,"avg_price":3073.832,"trade_volume":5190400,"side":0,"is_trade":true,"level":1,"trade_session":null,"trade_type":null}],"error_code":0,"error_description":null}
response = self.do_request(url, data, method='GET')
if response is not None:
jsonobj = json.loads(response)
if jsonobj['error_code'] == 0:
result = []
for rsp in jsonobj['data']:
result.append( {
'time' : datetime.datetime.fromtimestamp(rsp['timestamp']/1000),
'symbol' : self.convert_from_xq_symbol(rsp['symbol']),
'high' : rsp['high'],
'low' : rsp['low'],
'last' : rsp['current'],
'change' : rsp['chg'],
'percent': rsp['percent'],
'volume' : rsp['volume'],
'amount' : rsp['amount'],
'turnover_rate' : rsp['turnover_rate'],
'market_capital' : rsp['market_capital'],
'float_market_capital' : rsp['float_market_capital'],
'is_trading' : rsp['is_trade'],
} )
return pd.DataFrame(result), ''
else:
return None, jsonobj['error_description']
else:
return None, '请求数据失败'
def get_kline(self, symbol, timestamp, period, count):
url = 'https://stock.xueqiu.com/v5/stock/chart/kline.json'
data = {
'symbol' : self.convert_to_xq_symbol(symbol),
'begin' : timestamp,
'period' : period,
'type' : 'before',
'count' : count,
'indicator' : 'kline',
}
cookies = self.prepare_cookies('https://xueqiu.com/hq')
response = self.do_request(url, data, cookies=cookies, method='GET')
if response is not None:
jsonobj = json.loads(response)
if jsonobj['error_code'] == 0:
result = []
if len(jsonobj['data']) <= 0:
return None, jsonobj['error_description']
for rsp in jsonobj['data']['item']:
result.append( {
'symbol' : symbol,
'time' : datetime.datetime.fromtimestamp(rsp[0]/1000),
'volume' : rsp[1],
'open' : rsp[2],
'high' : rsp[3],
'low' : rsp[4],
'last' : rsp[5],
'change' : rsp[6],
'percent': rsp[7],
'turnover_rate' : rsp[8],
} )
return pd.DataFrame(result), ''
else:
return None, jsonobj['error_description']
else:
return None, '请求数据失败'
def get_kline_multisymbol(self, symbols, timestamp, period, count):
cookies = self.prepare_cookies('https://xueqiu.com/hq')
url = 'https://stock.xueqiu.com/v5/stock/chart/kline.json'
result = []
for symbol in symbols:
data = {
'symbol' : self.convert_to_xq_symbol(symbol),
'begin' : timestamp,
'period' : period,
'type' : 'before',
'count' : count,
'indicator' : 'kline',
}
response = self.do_request(url, data, cookies=cookies, method='GET')
if response is not None:
jsonobj = json.loads(response)
if jsonobj['error_code'] == 0:
for rsp in jsonobj['data']['item']:
result.append( {
'symbol' : symbol,
'time' : datetime.datetime.fromtimestamp(rsp[0]/1000),
'volume' : rsp[1],
'open' : rsp[2],
'high' : rsp[3],
'low' : rsp[4],
'last' : rsp[5],
'change' : rsp[6],
'percent': rsp[7],
'turnover_rate': rsp[8],
} )
return pd.DataFrame(result), ''
def get_kline_multitimestamp(self, symbol, timestamps, period, count):
cookies = self.prepare_cookies('https://xueqiu.com/hq')
url = 'https://stock.xueqiu.com/v5/stock/chart/kline.json'
result = []
for timestamp in timestamps:
data = {
'symbol' : self.convert_to_xq_symbol(symbol),
'begin' : timestamp,
'period' : period,
'type' : 'before',
'count' : count,
'indicator' : 'kline',
}
response = self.do_request(url, data, cookies=cookies, method='GET')
if response is not None:
jsonobj = json.loads(response)
if jsonobj['error_code'] == 0:
for rsp in jsonobj['data']['item']:
result.append( {
'symbol' : symbol,
'time' : datetime.datetime.fromtimestamp(rsp[0]/1000),
'volume' : rsp[1],
'open' : rsp[2],
'high' : rsp[3],
'low' : rsp[4],
'last' : rsp[5],
'change' : rsp[6],
'percent': rsp[7],
'turnover_rate': rsp[8],
} )
return pd.DataFrame(result), ''
class SinaAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
@staticmethod
def clear_text(text):
return text.replace('\n', '').strip()
def get_adj_factor(self, symbol):
now = datetime.datetime.now()
year = now.year
month = now.month
if month < 4 :
quarter = 1
elif month < 7:
quarter = 2
elif month < 10:
quarter = 3
else:
quarter = 4
temp = symbol.split(".")
url = 'http://vip.stock.finance.sina.com.cn/corp/go.php/vMS_FuQuanMarketHistory/stockid/%s.phtml' % temp[0]
curr_year = year
curr_quarter = quarter
result_list = []
no_data_cnt = 0
while True:
print('getting data for year = %d, quarter = %d' % (curr_year, curr_quarter))
param = {
'year' : curr_year,
'jidu' : curr_quarter,
}
response = self.do_request(url, param, method='GET', encoding='gb18030')
soup = BeautifulSoup(response, "html5lib")
divs = soup.find_all('div')
data = []
for div in divs:
if div.has_attr('class') and 'tagmain' in div['class']:
tables = div.find_all('table')
for table in tables:
if table.has_attr('id') and table['id'] == 'FundHoldSharesTable':
rows = table.findAll('tr')
for row in rows:
cols = row.findAll('td')
if len(cols) == 8:
date = SinaAgent.clear_text(cols[0].text)
adjust_factor = SinaAgent.clear_text(cols[7].text)
if date == '日期':
continue
data.append({
"date": date,
"adjust_factor": adjust_factor,
})
result_list.extend(data)
if len(data) == 0:
no_data_cnt = no_data_cnt + 1
if no_data_cnt >= 3:
break
# prepare for next round
if curr_quarter == 1:
curr_year = curr_year - 1
curr_quarter = 4
else:
curr_quarter = curr_quarter - 1
return pd.DataFrame(result_list), ""
# 600000.SH -> SH600000
def convert_to_sina_symbol(self, symbol):
temp = symbol.split(".")
return temp[1].lower() + temp[0]
def get_trade_detail(self, symbol, trade_date):
url = 'http://market.finance.sina.com.cn/downxls.php?date=%s&symbol=%s' % (trade_date, self.convert_to_sina_symbol(symbol))
response = self.do_request(url, None, method='GET', type='text', encoding='gb18030')
if response is not None:
rsp = io.StringIO(response)
line = rsp.readline() # skip first line
line = rsp.readline()
result = []
while line is not None and len(line) > 10:
items = line.split('\t')
if len(items) == 6:
result.append({
'time' : SinaAgent.clear_text(items[0]),
'price' : SinaAgent.clear_text(items[1]),
'change' : SinaAgent.clear_text(items[2]),
'volume' : SinaAgent.clear_text(items[3]),
'turnover': SinaAgent.clear_text(items[4]),
'bs' : SinaAgent.clear_text(items[5]),
})
line = rsp.readline()
df = pd.DataFrame(result)
df['date'] = trade_date
df['symbol'] = symbol
return df, ''
return None, '获取数据失败'
class CNInfoAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
@staticmethod
def clear_text(text):
return text.replace('\n', '').strip()
def _parse_report_file(self, file):
lines = file.readlines()
data_list = []
for i in range(len(lines)):
items = lines[i].decode('gbk').split()
if items[0][:2] == '机构':
head = items[0].split(sep=',')
else:
items = lines[i].decode('gbk')[1:]
data = items.split(sep=',')
data[0] = data[0][1:-1]
data[-1] = remove_non_numerical(data[-1])
data_list.append(data)
df = pd.DataFrame(data_list)
df.columns = head
return df
def get_report_data(self, market, symbol, type):
url = 'http://www.cninfo.com.cn/cninfo-new/data/download'
data = {
'market' : market,
'type' : type,
'code' : symbol,
'orgid' : 'gs%s%s' % (market, symbol),
'minYear' : '1990',
'maxYear' : '2018',
}
response = self.do_request(url, param=data, method='POST', type='binary')
'''if response is None:
return None, '没有获取到数据'
else:
'''
try:
zip_ref = zipfile.ZipFile(io.BytesIO(response))
df_list = []
for finfo in zip_ref.infolist():
file = zip_ref.open(finfo, 'r')
df = self._parse_report_file(file)
df_list.append(df)
df_result = _concat_df(df_list)
df_result.reset_index(inplace=True, drop=True)
return df_result, ''
except:
return None, '获取数据失败'
def get_shareholder_structure(self, market, symbol):
if symbol.startswith('002'):
board = 'sme'
elif symbol.startswith('3'):
board = 'cn'
else:
board = 'mb'
url = 'http://www.cninfo.com.cn/information/lastest/%s%s%s.html' % (market, board, symbol)
response = self.do_request(url, encoding='gb18030')
if response is None:
return None, '获取数据失败'
soup = BeautifulSoup(response, "html5lib")
divs = soup.find_all('div')
data = []
for div in divs:
if div.has_attr('class') and 'clear' in div['class']:
tables = div.find_all('table')
for table in tables:
rows = table.findAll('tr')
for row in rows:
cols = row.findAll('td')
if len(cols) == 2:
indicator = CNInfoAgent.clear_text(cols[0].text).replace(':', '')
value = CNInfoAgent.clear_text(cols[1].text)
data.append({
"indicator": indicator,
"value" : value,
})
break
return pd.DataFrame(data), ""
def get_dividend(self, symbol):
symbol = symbol[:6]
url = "http://www.cninfo.com.cn/information/dividend/szmb%s.html"
response = self.do_request(url % symbol, method='GET', encoding='gbk')
if response is None:
return pd.DataFrame([])
soup = BeautifulSoup(response, 'html5lib')
# get name_cn
tds = soup.find_all('td')
for td in tds:
if td.has_attr('style') and 'padding-right:10px' in td['style']:
name_cn = td.text.split(':')[-1]
#get dividend_data
divs = soup.find_all('div')
for div in divs:
if div.has_attr('class') and 'clear' in div['class']:
trs = div.find_all('tr')
if trs == []:
continue
data_list = []
for tr in trs[1:]:
data = [symbol, name_cn]
tds = tr.find_all('td')
for td in tds:
text = td.text.replace(' ', '').replace('\n', '').replace('\xa0', '')
data.append(text)
data_list.append(data)
df_res = pd.DataFrame(data_list, columns=['股票代码', '公司名称', '分红年度', '分红方案', '股权登记日',
'除权日', '红股上市日'])
df_res['股权登记日'] = df_res['股权登记日'].map(time_map)
df_res['除权日'] = df_res['除权日'].map(time_map)
df_res['分红方案'] = df_res['分红方案'].map(plan_map)
df_res['税后股利'] = df_res['分红方案'].map(lambda x: 0.8 * float(x))
df_res['公司代码'] = df_res['股票代码']
df = df_res[['公司代码', '股权登记日', '分红方案', '税后股利', '除权日', '公司名称', '股票代码']]
df.columns = ['COMPANY_CODE', 'DIVIDEND_DATE', 'DIVIDEND_PER_SHARE1_A',
'DIVIDEND_PER_SHARE2_A', 'EX_DIVIDEND_DATE_A','SECURITY_ABBR_A', 'SECURITY_CODE_A']
return df
class EastMoneyAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
def _parse_hist_money_flow(self, response):
jsonobj = json.loads(response)
result = []
for data in jsonobj['data']:
items = data.split(',')
result.append({
'Time': items[0],
'ZLJLRJE': items[1],
'ZLJLRZB': items[2],
'CDDJLRJE': items[3],
'CDDJLRZB': items[4],
'DDLRJE': items[5],
'DDLRZB': items[6],
'ZDLRJE': items[7],
'ZDLRZB': items[8],
'XDLRJE': items[9],
'XDLRZB': items[10],
})
return pd.DataFrame(result)
def _get_hist_money_flow(self, url):
response = self.do_request(url)
if response is None:
return None, '获取数据失败'
df = self._parse_hist_money_flow(response)
return df, ''
def get_hist_money_flow(self, symbol):
url = 'http://ff.eastmoney.com//EM_CapitalFlowInterface/api/js?type=hff&rtntype=2&js={"data":(x)}&check=TMLBMSPROCR&acces_token=1942f5da9b46b069953c873404aad4b5&id=%s' % symbol
return self._get_hist_money_flow(url)
def get_hist_money_flow_market(self):
url = 'http://data.eastmoney.com/zjlx/dpzjlx.html'
response = self.do_request(url)
if response is None:
return None, '获取数据失败'
# get data from html
idx = response.find('var DefaultJson=')
idx1 = response.find('[', idx)
idx2 = response.find(']', idx)
json_rsp = '{ "data": ' + response[idx1:idx2+1] + '}'
df = self._parse_hist_money_flow(json_rsp)
return df, ''
def _get_realtime_money_flow(self, url):
response = self.do_request(url)
if response is None:
return None, '获取数据失败'
jsonobj = json.loads(response)
result = {}
result['Time'] = jsonobj['xa'].split(',')
result['ZLJLRJE'] = list()
result['CDDJLRJE'] = list()
result['DDJLRJE'] = list()
result['ZDJLRJE'] = list()
result['XDJLRJE'] = list()
for data in jsonobj['ya']:
items = data.split(',')
result['ZLJLRJE'].append(items[0])
result['CDDJLRJE'].append(items[1])
result['DDJLRJE'].append(items[2])
result['ZDJLRJE'].append(items[3])
result['XDJLRJE'].append(items[4])
df = pd.DataFrame().from_dict(result, orient='index').T
df.dropna(inplace=True)
return df, ''
def get_realtime_money_flow(self, symbol):
url = 'http://ff.eastmoney.com/EM_CapitalFlowInterface/api/js?id=%s&type=ff&check=MLBMS&js={(x)}&rtntype=3&acces_token=1942f5da9b46b069953c873404aad4b5' % symbol
return self._get_realtime_money_flow(url)
def get_realtime_money_flow_market(self):
url = 'http://ff.eastmoney.com/EM_CapitalFlowInterface/api/js?id=ls&type=ff&check=MLBMS&js={(x)}&rtntype=3&acces_token=1942f5da9b46b069953c873404aad4b5'
return self._get_realtime_money_flow(url)
#==============================================================================
#一次性获取所有股票的实时资金流,并按照主力净流入净额排序
## 指标定义
# 超大单:大于等于50万股或者100万元的成交单;
# 大单:大于等于10万股或者20万元且小于50万股和100万元的成交单;
# 中单:大于等于2万股或者4万元且小于10万股和20万元的成交单;
# 小单:小于2万股和4万元的成交单;
# 流入:买入成交额;
# 流出:卖出成交额;
# 主力流入:超大单加大单买入成交额之和;
# 主力流出:超大单加大单卖出成交额之和;
# 净额:流入-流出;
# 净比:(流入-流出)/总成交额;
# 单位:亿元
#==============================================================================
def toDataFrame(self,ll):
dataframe = []
for l in ll:
l = l.replace('-','0')
temp = l.split(",")[1:]
temp[2:-2] = map(eval, temp[2:-2])
dataframe.append(temp)
dataframe = pd.DataFrame(dataframe)
dataframe.columns = [u'代码',u'名称',u'最新价',u'今日涨跌幅',u'今日主力净流入净额',u'今日主力净流入净占比',u'今日超大单净流入净额',u'今日超大单净流入净占比',u'今日大单净流入净额',u'今日大单净流入净占比',u'今日中单净流入净额',u'今日中单净流入净占比',u'今日小单净流入净额',u'今日小单净流入净占比',u'time',u'未知']
return dataframe
def _get_realtime_allstock_flow(self, url):
response = self.do_request(url)
if response is None:
return None, '获取数据失败'
pages = 'pages'
date = 'date'
data = 'data'
data = eval(response[13:])
flashflow = data['data']
df = self.toDataFrame(flashflow)
df.index = df.ix[:,0]
df.dropna(inplace=True)
return df, ''
def get_allstock_flow(self):
url = 'http://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?type=ct&st=(BalFlowMain)&sr=-1&p=1&ps=3700&js=var%20ucjEIgIa={pages:(pc),date:%222014-10-22%22,data:[(x)]}&token=1942f5da9b46b069953c873404aad4b5&cmd=C._AB&sty=DCFFITA&rt=50984894'
return self._get_realtime_allstock_flow(url)
|
py | 1a52477a0ccf6e62eeaabbf7470425ab5c7088da | from tqdm.auto import tqdm
import numpy as np
import glob
import os
from torchvision import models
from torchvision import transforms
import torch
import torch.nn as nn
from PIL import Image
import gc
import argparse
import h5py
import json
from augs import (
GaussianBlur,
Cutout,
CutoutColor,
CenterCrop,
Rotate,
Flip,
Grayscale,
Original,
)
parser = argparse.ArgumentParser(description="dump videos as features")
parser.add_argument(
"--videos_path",
default="",
type=str,
required=True,
help="path to npy stored videos",
)
parser.add_argument(
"--save_path", default="", type=str, required=True, help="path to features",
)
args = parser.parse_args()
device = "cuda" if torch.cuda.is_available() else "cpu"
batch_size = 32
preprocess = transforms.Compose(
[
transforms.Resize(256),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
resnet = models.resnet50(pretrained=True)
modules = list(resnet.children())[:-1]
resnet = nn.Sequential(*modules)
resnet.to(device)
resnet.eval()
files = glob.glob(os.path.join(args.videos_path, "*.npy"))
errors = []
Augmentations = [
Original(),
GaussianBlur(),
Cutout(),
CutoutColor(),
CenterCrop(),
Rotate(),
Flip(),
Grayscale(),
]
dataset = h5py.File("datasets/eccv16_dataset_tvsum_google_pool5.h5", "r")
all_picks = dict(
zip(
list(dataset.keys()),
[dataset[key]["picks"][...] for key in list(dataset.keys())],
)
)
f = open("id_to_key_map_tvsum.json")
id_key_map = json.load(f)
f.close()
for i, file in enumerate(files):
prefix = file.split("/")[-1].split(".")[0]
save_path = os.path.join(args.save_path, prefix)
picks = all_picks[id_key_map[prefix]]
if not os.path.exists(save_path):
os.mkdir(save_path)
try:
video = np.load(file)
video = video[picks, :, :, :]
except:
errors.append(file)
continue
for aug in tqdm(Augmentations, desc=f"Augmenting video {i+1}/{len(files)}"):
aug_name = aug.__class__.__name__.lower()
curr_save_path = os.path.join(save_path, f"{prefix}_{aug_name}.pt")
if os.path.exists(curr_save_path):
continue
video_aug = aug(video)
features = []
inputs = []
for image in tqdm(video_aug, desc=aug_name):
image = Image.fromarray(image.astype(np.uint8))
image = preprocess(image)
image = image.unsqueeze(0).to(device)
inputs.append(image)
if len(inputs) % batch_size == 0:
inputs = torch.cat(inputs, 0)
with torch.no_grad():
feat = resnet(inputs)
features.append(feat.squeeze().cpu())
inputs = []
if len(inputs) > 0:
inputs = torch.cat(inputs, 0)
with torch.no_grad():
feat = resnet(inputs)
features.append(feat.squeeze(-1).squeeze(-1).cpu())
features = torch.cat(features, 0)
features = features.view(-1, 2048)
torch.save(features.cpu(), curr_save_path)
del features
gc.collect()
print("Errors")
print(errors)
|
py | 1a5247a00eb67a9973ee24bd81945ffc6f6f81ae | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from django.core.management.base import BaseCommand
from collections import namedtuple
from corehq.apps.userreports.models import AsyncIndicator, get_datasource_config
from corehq.apps.userreports.util import get_indicator_adapter
DOMAIN = 'icds-cas'
DATA_SOURCES = (
'static-icds-cas-static-child_cases_monthly_tableau_v2',
'static-icds-cas-static-ccs_record_cases_monthly_tableau_v2',
)
FakeChange = namedtuple('FakeChange', ['id', 'document'])
CASE_DOC_TYPE = 'CommCareCase'
STATE_IDS = [
'f98e91aa003accb7b849a0f18ebd7039',
'f9b47ea2ee2d8a02acddeeb491d3e175',
'a2fcb186e9be8464e167bb1c56ce8fd9',
'f1cd643f0df908421abd915298ba57bc',
'd982a6fb4cca0824fbde59db18d3800f',
'9cd4fd88d9f047088a377b7e7d144830',
'ea4d587fa93a2ed8300853d51db661ef',
]
class Command(BaseCommand):
help = ""
def handle(self, *args, **options):
fake_change_doc = {'doc_type': CASE_DOC_TYPE, 'domain': DOMAIN}
for data_source_id in DATA_SOURCES:
print("processing data source %s" % data_source_id)
data_source, is_static = get_datasource_config(data_source_id, DOMAIN)
assert is_static
adapter = get_indicator_adapter(data_source)
table = adapter.get_table()
for case_id in self._get_case_ids_to_process(adapter, table, data_source_id):
change = FakeChange(case_id, fake_change_doc)
AsyncIndicator.update_from_kafka_change(change, [data_source_id])
def _add_filters(self, query, table, data_source_id):
if data_source_id == 'static-icds-cas-static-child_cases_monthly_tableau_v2':
return query.filter(
table.columns.valid_all_registered_in_month == 1,
table.columns.valid_in_month == 0,
)
elif data_source_id == 'static-icds-cas-static-ccs_record_cases_monthly_tableau_v2':
return query.filter(
table.columns.pregnant_all == 1,
table.columns.pregnant == 0,
)
def _get_case_ids_to_process(self, adapter, table, data_source_id):
for state_id in STATE_IDS:
print("processing state %s" % state_id)
query = adapter.session_helper.Session.query(table.columns.doc_id).distinct(table.columns.doc_id)
case_ids = query.filter(
table.columns.state_id == state_id,
)
case_ids = self._add_filters(query, table, data_source_id).all()
num_case_ids = len(case_ids)
print("processing %d cases" % (num_case_ids))
for i, case_id in enumerate(case_ids):
yield case_id
if i % 1000 == 0:
print("processed %d / %d docs" % (i, num_case_ids))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.