filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_13099 | # Copyright (c) 2011 Tencent Inc.
# All rights reserved.
#
# Author: Huan Yu <[email protected]>
# Feng Chen <[email protected]>
# Yi Wang <[email protected]>
# Chong Peng <[email protected]>
# Wenting Li <[email protected]>
# Date: October 20, 2011
# pylint: disable=too-many-lines
"""
This is the scons rules helper module which should be
imported by Scons script
"""
from __future__ import absolute_import
import os
import py_compile
import re
import shutil
import signal
import socket
import string
import subprocess
import sys
import tarfile
import tempfile
import time
import zipfile
# pylint: disable=E0401
import SCons
import SCons.Action
import SCons.Builder
import SCons.Scanner
import SCons.Scanner.Prog
from blade import blade_util
from blade import console
from blade import toolchain
from blade.blade_util import iteritems
from blade.console import color
# option_verbose to indicate print verbosity level
_verbosity = 'normal'
# blade path
blade_path = os.path.dirname(__file__)
# build error log during scons execution
blade_error_log = None
# linking tmp dir
linking_tmp_dir = ''
# build time stamp
build_time = time.time()
proto_import_re = re.compile(r'^import\s+"(\S+)"\s*;\s*$', re.M)
proto_import_public_re = re.compile(r'^import\s+public\s+"(\S+)"\s*;\s*$', re.M)
def set_blade_error_log(path):
global blade_error_log
if blade_error_log:
console.warning('blade error log was already set to %s' %
blade_error_log.name)
else:
blade_error_log = open(path, 'w')
def _compile_python(src, build_dir):
if src.startswith(build_dir):
pyc = src + 'c'
else:
pyc = os.path.join(build_dir, src) + 'c'
dir = os.path.dirname(pyc)
if not os.path.exists(dir):
os.makedirs(dir)
py_compile.compile(src, pyc)
return pyc
def generate_python_library(target, source, env):
data = dict()
data['base_dir'] = env.get('BASE_DIR', '')
srcs = []
for s in source:
src = str(s)
digest = blade_util.md5sum_file(src)
srcs.append((src, digest))
data['srcs'] = srcs
with open(str(target[0]), 'w') as f:
f.write(str(data))
return None
def generate_python_binary(target, source, env):
"""The action to generate python executable file. """
target_name = str(target[0])
base_dir, build_dir = env.get('BASE_DIR', ''), env['BUILD_DIR']
entry = env['ENTRY']
srcs = [str(s) for s in source]
toolchain.generate_python_binary(base_dir, entry, target_name, srcs)
return None
def generate_resource_index(target, source, env):
res_source_path = str(target[0])
res_header_path = str(target[1])
if not os.path.exists(os.path.dirname(res_header_path)):
os.mkdir(os.path.dirname(res_header_path))
with open(res_header_path, 'w') as h, open(res_source_path, 'w') as c:
source_path = env["SOURCE_PATH"]
full_name = blade_util.regular_variable_name("%s/%s" % (source_path, env["TARGET_NAME"]))
guard_name = 'BLADE_RESOURCE_%s_H' % full_name.upper()
h.write('#ifndef %s\n#define %s\n' % (guard_name, guard_name))
h.write('''
// This file was automatically generated by blade
#ifdef __cplusplus
extern "C" {
#endif
#ifndef BLADE_RESOURCE_TYPE_DEFINED
#define BLADE_RESOURCE_TYPE_DEFINED
struct BladeResourceEntry {
const char* name;
const char* data;
unsigned int size;
};
#endif
''')
res_index_name = 'RESOURCE_INDEX_%s' % full_name
c.write('// This file was automatically generated by blade\n')
c.write('#include "%s"\n' % res_header_path)
c.write('const struct BladeResourceEntry %s[] = {\n' % res_index_name)
for s in source:
src = str(s)
var_name = blade_util.regular_variable_name(src)
org_src = os.path.relpath(src, source_path)
h.write('// %s\n' % org_src)
h.write('extern const char RESOURCE_%s[%d];\n' % (var_name, s.get_size()))
h.write('extern const unsigned RESOURCE_%s_len;\n' % var_name)
c.write(' { "%s", RESOURCE_%s, %s },\n' % (org_src, var_name, s.get_size()))
c.write('};\n')
c.write('const unsigned %s_len = %s;\n' % (res_index_name, len(source)))
h.write('// Resource index\n')
h.write('extern const struct BladeResourceEntry %s[];\n' % res_index_name)
h.write('extern const unsigned %s_len;\n' % res_index_name)
h.write('\n#ifdef __cplusplus\n} // extern "C"\n#endif\n')
h.write('\n#endif // %s\n' % guard_name)
return None
def generate_resource_file(target, source, env):
"""Generate resource source file in resource_library"""
src_path = str(source[0])
new_src_path = str(target[0])
cmd = ('xxd -i %s | sed -e "s/^unsigned char /const char RESOURCE_/g" '
'-e "s/^unsigned int /const unsigned int RESOURCE_/g"> %s') % (
src_path, new_src_path)
p = subprocess.Popen(
cmd,
env={},
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
stdout, stderr = p.communicate()
if p.returncode or stderr:
error = 'failed to generate resource file'
if stderr:
error = error + ': ' + stderr
console.error_exit(error)
return p.returncode
def process_java_sources(target, source, env):
"""Copy source file into .sources dir. """
shutil.copy2(str(source[0]), str(target[0]))
return None
def process_java_resources(target, source, env):
"""Copy resource file into .resources dir. """
shutil.copy2(str(source[0]), str(target[0]))
return None
def _check_java_jar_classes(sources, classes_dir):
"""Check if all the classes are generated into classes_dir completely. """
# pylint: disable=too-many-nested-blocks
sources = sorted([os.path.basename(s) for s in sources])
sources = [s for s in sources if s[0].isupper()]
classes = ['%s.class' % s[:-5] for s in sources]
if not classes:
return
generated_classes = []
paths = set()
retry = 0
while retry < 3:
for dir, subdirs, files in os.walk(classes_dir):
for f in files:
if f.endswith('.class'):
f = os.path.relpath(os.path.join(dir, f), classes_dir)
if f not in paths:
paths.add(f)
name = os.path.basename(f)
if '$' not in name:
generated_classes.append(name)
generated_classes.sort()
i, j = 0, 0
while j != len(generated_classes):
if classes[i] == generated_classes[j]:
i += 1
if i == len(classes):
return
j += 1
time.sleep(0.5)
retry += 1
console.debug('Classes: %s Generated classes: %s' % (classes, generated_classes))
console.error_exit('Missing class files in %s' % classes_dir)
def _generate_java_jar(target, sources, resources, env):
"""
Compile the java sources and generate a jar containing the classes and resources.
"""
classes_dir = target.replace('.jar', '.classes')
resources_dir = target.replace('.jar', '.resources')
if os.path.exists(classes_dir):
shutil.rmtree(classes_dir)
os.makedirs(classes_dir)
java, javac, jar, options = env['JAVA'], env['JAVAC'], env['JAR'], env['JAVACFLAGS']
classpath = ':'.join(env['JAVACLASSPATH'])
if not classpath:
classpath = blade_util.get_cwd()
if sources:
cmd = '%s %s -d %s -classpath %s %s' % (
javac, options, classes_dir, classpath, ' '.join(sources))
if echospawn(args=[cmd], env=os.environ, sh=None, cmd=None, escape=None):
return 1
cmd = ['%s cf %s' % (jar, target)]
if sources:
_check_java_jar_classes(sources, classes_dir)
cmd.append('-C %s .' % classes_dir)
if os.path.exists(resources_dir):
for resource in resources:
cmd.append("-C '%s' '%s'" % (resources_dir,
os.path.relpath(resource, resources_dir)))
cmd_str = ' '.join(cmd)
return echospawn(args=[cmd_str], env=os.environ, sh=None, cmd=None, escape=None)
def generate_java_jar(target, source, env):
target = str(target[0])
sources = []
index = 0
for src in source:
if str(src).endswith('.java'):
sources.append(str(src))
index += 1
else:
break
resources = [str(src) for src in source[index:]]
return _generate_java_jar(target, sources, resources, env)
_one_jar_boot_path = None
def _generate_one_jar(target,
main_class,
main_jar,
deps_jar,
one_jar_boot_path):
target_dir = os.path.dirname(target)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
target_one_jar = zipfile.ZipFile(target, 'w')
jar_path_set = set()
# Copy files from one-jar-boot.jar to the target jar
zip_file = zipfile.ZipFile(one_jar_boot_path, 'r')
name_list = zip_file.namelist()
for name in name_list:
if not name.lower().endswith('manifest.mf'): # Exclude manifest
target_one_jar.writestr(name, zip_file.read(name))
jar_path_set.add(name)
zip_file.close()
# Main jar and dependencies
target_one_jar.write(main_jar, os.path.join('main',
os.path.basename(main_jar)))
for dep in deps_jar:
dep_name = os.path.basename(dep)
target_one_jar.write(dep, os.path.join('lib', dep_name))
# Copy resources to the root of target onejar
for jar in [main_jar] + deps_jar:
jar = zipfile.ZipFile(jar, 'r')
jar_name_list = jar.namelist()
for name in jar_name_list:
if name.endswith('.class') or name.upper().startswith('META-INF'):
continue
if name not in jar_path_set:
jar_path_set.add(name)
target_one_jar.writestr(name, jar.read(name))
jar.close()
# Manifest
# Note that the manifest file must end with a new line or carriage return
target_one_jar.writestr(os.path.join('META-INF', 'MANIFEST.MF'),
'''Manifest-Version: 1.0
Main-Class: com.simontuffs.onejar.Boot
One-Jar-Main-Class: %s
''' % main_class)
target_one_jar.close()
return None
def generate_one_jar(target, source, env):
if len(source) < 2:
console.error_exit('Failed to generate java binary from %s: '
'Source should at least contain main class '
'and main jar' % ','.join(str(s) for s in source))
main_class = str(source[0])
main_jar = str(source[1])
deps_jar = []
for dep in source[2:]:
deps_jar.append(str(dep))
target = str(target[0])
# print target, main_class, main_jar, deps_jar, _one_jar_boot_path
return _generate_one_jar(target, main_class, main_jar, deps_jar,
_one_jar_boot_path)
def _is_signature_file(name):
parts = name.upper().split('/')
if len(parts) == 2:
for suffix in ('.SF', '.DSA', '.RSA'):
if parts[1].endswith(suffix):
return True
if parts[1].startswith('SIG-'):
return True
return False
_JAR_MANIFEST = 'META-INF/MANIFEST.MF'
_FATJAR_EXCLUSIONS = frozenset(['LICENSE', 'README', 'NOTICE',
'META-INF/LICENSE', 'META-INF/README',
'META-INF/NOTICE', 'META-INF/INDEX.LIST'])
def _is_fat_jar_excluded(name):
name = name.upper()
for exclusion in _FATJAR_EXCLUSIONS:
if name.startswith(exclusion):
return True
return name == _JAR_MANIFEST or _is_signature_file(name)
def _generate_fat_jar(target, deps_jar, env):
"""Generate a fat jar containing the contents of all the jar dependencies. """
target_dir = os.path.dirname(target)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
target_fat_jar = zipfile.ZipFile(target, 'w', zipfile.ZIP_DEFLATED)
# Record paths written in the fat jar to avoid duplicate writing
zip_path_dict = {}
zip_path_conflicts = 0
for dep_jar in deps_jar:
jar = zipfile.ZipFile(dep_jar, 'r')
name_list = jar.namelist()
for name in name_list:
if name.endswith('/') or not _is_fat_jar_excluded(name):
if name not in zip_path_dict:
target_fat_jar.writestr(name, jar.read(name))
zip_path_dict[name] = os.path.basename(dep_jar)
else:
if not name.endswith('/'): # Not a directory
zip_path_conflicts += 1
console.log('%s: duplicate path %s found in {%s, %s}' % (
target, name, zip_path_dict[name],
os.path.basename(dep_jar)))
jar.close()
if zip_path_conflicts:
console.warning('%s: Found %d conflicts when packaging. '
'See %s for details.' % (
target, zip_path_conflicts, console.get_log_file()))
# TODO(wentingli): Create manifest from dependency jars later if needed
contents = 'Manifest-Version: 1.0\nCreated-By: Python.Zipfile (Blade)\n'
main_class = env.Dictionary().get('JAVAMAINCLASS')
if main_class:
contents += 'Main-Class: %s\n' % main_class
contents += '\n'
target_fat_jar.writestr(_JAR_MANIFEST, contents)
target_fat_jar.close()
return None
def generate_fat_jar(target, source, env):
target = str(target[0])
dep_jars = [str(dep) for dep in source]
# Create a new process for fatjar packaging to avoid GIL
cmd = 'PYTHONPATH=%s:$PYTHONPATH python -m fatjar %s %s' % (
blade_path, target, ' '.join(dep_jars))
p = subprocess.Popen(cmd,
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
stdout, stderr = p.communicate()
if stdout:
console.warning('%s See %s for details.' % (
stdout.rstrip(), console.get_log_file()))
if stderr:
console.log(stderr)
return p.returncode
def _generate_java_binary(target_name, onejar_path, jvm_flags, run_args):
"""generate a wrapper shell script to run jar"""
onejar_name = os.path.basename(onejar_path)
full_path = os.path.abspath(onejar_path)
target_file = open(target_name, 'w')
target_file.write("""#!/bin/sh
# Auto generated wrapper shell script by blade
jar=`dirname "$0"`/"%s"
if [ ! -f "$jar" ]; then
jar="%s"
fi
exec java %s -jar "$jar" %s $@
""" % (onejar_name, full_path, jvm_flags, run_args))
os.chmod(target_name, 0o755)
target_file.close()
return None
def generate_java_binary(target, source, env):
"""build function to generate wrapper shell script for java binary"""
target_name = str(target[0])
onejar_path = str(source[0])
return _generate_java_binary(target_name, onejar_path, '', '')
def _get_all_test_class_names_in_jar(jar):
"""Returns a list of test class names in the jar file. """
test_class_names = []
zip_file = zipfile.ZipFile(jar, 'r')
name_list = zip_file.namelist()
for name in name_list:
basename = os.path.basename(name)
# Exclude inner class and Test.class
if (basename.endswith('Test.class') and
len(basename) > len('Test.class') and
not '$' in basename):
class_name = name.replace('/', '.')[:-6] # Remove .class suffix
test_class_names.append(class_name)
zip_file.close()
return test_class_names
def _generate_java_test_coverage_flag(env):
"""Returns java test coverage flags based on the environment passed in. """
env_dict = env.Dictionary()
jacoco_agent = env_dict.get('JACOCOAGENT')
if jacoco_agent:
jacoco_agent = os.path.abspath(jacoco_agent)
target_under_test_package = env_dict.get('JAVATARGETUNDERTESTPKG')
if target_under_test_package:
options = []
options.append('includes=%s' % ':'.join(
[p + '.*' for p in target_under_test_package if p]))
options.append('output=file')
return '-javaagent:%s=%s' % (jacoco_agent, ','.join(options))
return ''
def _generate_java_test(target, main_class, jars, jvm_flags, run_args, env):
target_file = open(target, 'w')
target_file.write("""#!/bin/sh
# Auto generated wrapper shell script by blade
if [ -n "$BLADE_COVERAGE" ]
then
coverage_options="%s"
fi
exec java $coverage_options -classpath %s %s %s %s $@
""" % (_generate_java_test_coverage_flag(env), ':'.join(jars),
jvm_flags, main_class, run_args))
os.chmod(target, 0o755)
target_file.close()
return None
def generate_java_test(target, source, env):
"""build function to generate wrapper shell script for java test"""
target_name = str(target[0])
main_class = str(source[0])
test_jar = str(source[1])
jars = []
for jar in source[1:]:
jars.append(os.path.abspath(str(jar)))
test_class_names = _get_all_test_class_names_in_jar(test_jar)
return _generate_java_test(target_name, main_class, jars, '',
' '.join(test_class_names), env)
def _generate_scala_jar(target, sources, resources, env):
"""
Compile scala sources and generate a jar containing
the classes and resources.
"""
scalac = env['SCALAC']
java = env['JAVA']
jar = env['JAR']
options = ' '.join(env['SCALACFLAGS'])
classpath = ':'.join(env['JAVACLASSPATH'])
if not classpath:
classpath = blade_util.get_cwd()
cmd = 'JAVACMD=%s %s -d %s -classpath %s %s %s' % (java, scalac, target,
classpath, options, ' '.join(sources))
if echospawn(args=[cmd], env=os.environ, sh=None, cmd=None, escape=None):
return 1
if resources:
resources_dir = target.replace('.jar', '.resources')
if os.path.exists(resources_dir):
cmd = ['%s uf %s' % (jar, target)]
for resource in resources:
cmd.append("-C '%s' '%s'" % (resources_dir,
os.path.relpath(resource, resources_dir)))
return echospawn(args=cmd, env=os.environ, sh=None, cmd=None, escape=None)
return None
def generate_scala_jar(target, source, env):
target = str(target[0])
sources = []
index = 0
for src in source:
if str(src).endswith('.scala'):
sources.append(str(src))
index += 1
else:
break
resources = [str(src) for src in source[index:]]
return _generate_scala_jar(target, sources, resources, env)
def _generate_scala_test(target, jars, test_class_names, env):
scala, java = env['SCALA'], env['JAVA']
scala, java = os.path.abspath(scala), os.path.abspath(java)
run_args = 'org.scalatest.run ' + ' '.join(test_class_names)
script = open(target, 'w')
script.write("""#!/bin/sh
# Auto generated wrapper shell script by blade
JAVACMD=%s exec %s -classpath %s %s $@
""" % (java, scala, ':'.join(jars), run_args))
script.close()
os.chmod(target, 0o755)
return None
def generate_scala_test(target, source, env):
"""Generate wrapper shell script for scala test. """
target = str(target[0])
test_jar = str(source[0])
jars = [os.path.abspath(str(jar)) for jar in source]
test_class_names = _get_all_test_class_names_in_jar(test_jar)
return _generate_scala_test(target, jars, test_class_names, env)
def process_package_source(target, source, env):
"""Copy source file into .sources dir. """
shutil.copy2(str(source[0]), str(target[0]))
return None
def _get_tar_mode_from_suffix(suffix):
return {
'tar': 'w',
'tar.gz': 'w:gz',
'tgz': 'w:gz',
'tar.bz2': 'w:bz2',
'tbz': 'w:bz2',
}[suffix]
def _archive_package_sources(package, sources, sources_dir):
"""Archive sources into the package and return a list of source info. """
manifest = []
for s in sources:
f = str(s)
if f.startswith(sources_dir):
path = os.path.relpath(f, sources_dir)
else:
path = os.path.basename(f)
package(f, path)
manifest.append('%s %s' % (s.get_csig(), path))
return manifest
_PACKAGE_MANIFEST = 'MANIFEST.TXT'
def _generate_tar_package(target, sources, sources_dir, suffix):
"""Generate a tar ball containing all of the source files. """
mode = _get_tar_mode_from_suffix(suffix)
tar = tarfile.open(target, mode)
manifest = _archive_package_sources(tar.add, sources, sources_dir)
manifest_path = '%s.MANIFEST' % target
with open(manifest_path, 'w') as m:
m.write('\n'.join(manifest) + '\n')
tar.add(manifest_path, _PACKAGE_MANIFEST)
tar.close()
return None
def _generate_zip_package(target, sources, sources_dir):
"""Generate a zip archive containing all of the source files. """
zip = zipfile.ZipFile(target, 'w', zipfile.ZIP_DEFLATED)
manifest = _archive_package_sources(zip.write, sources, sources_dir)
zip.writestr(_PACKAGE_MANIFEST, '\n'.join(manifest) + '\n')
zip.close()
return None
def generate_package(target, source, env):
"""Generate a package containing all of the source files. """
target = str(target[0])
sources_dir = target + '.sources'
suffix = env['PACKAGESUFFIX']
if suffix == 'zip':
return _generate_zip_package(target, source, sources_dir)
else:
return _generate_tar_package(target, source, sources_dir, suffix)
def generate_shell_test_data(target, source, env):
"""Generate test data used by shell script for subsequent execution. """
target = str(target[0])
with open(target, 'w') as testdata:
for i in range(0, len(source), 2):
testdata.write(os.path.abspath(str(source[i])) + ' ' + source[i + 1] + '\n')
return None
def generate_shell_test(target, source, env):
"""Generate a shell wrapper to run shell scripts in source one by one. """
target = str(target[0])
with open(target, 'w') as script:
script.write('#!/bin/sh\n')
script.write('# Auto generated wrapper shell script by blade\n')
script.write('set -e\n')
for s in source:
script.write('. %s' % os.path.abspath(str(s)) + '\n')
script.write('\n')
os.chmod(target, 0o755)
return None
def generate_proto_go_source(target, source, env):
"""Generate go source file by invoking protobuf compiler. """
source = source[0]
import_protos = proto_import_re.findall(source.get_text_contents())
parameters = 'import_prefix=%s/' % env['PROTOBUFGOPATH']
if import_protos:
proto_mappings = []
for proto in import_protos:
dir = os.path.dirname(proto)
name = os.path.basename(proto)
proto_mappings.append('M%s=%s' % (
proto, os.path.join(dir, name.replace('.', '_'))))
parameters += ',%s' % ','.join(proto_mappings)
cmd = '%s --proto_path=. --plugin=protoc-gen-go=%s -I. %s -I=%s --go_out=%s:%s %s' % (
env['PROTOC'], env['PROTOCGOPLUGIN'], env['PROTOBUFINCS'],
os.path.dirname(str(source)), parameters, env['BUILDDIR'], source)
return echospawn(args=[cmd], env=os.environ, sh=None, cmd=None, escape=None)
def copy_proto_go_source(target, source, env):
"""Copy go source file generated by protobuf into go standard directory. """
shutil.copy2(str(source[0]), str(target[0]))
return None
def _generate_go_package(target, source, env):
go, go_home = env['GOCMD'], env['GOHOME']
cmd = 'GOPATH=%s %s install %s' % (go_home, go, env['GOPACKAGE'])
return echospawn(args=[cmd], env=os.environ, sh=None, cmd=None, escape=None)
def generate_go_library(target, source, env):
"""
Generate go package object. Note that the sources should be
in the same directory and the go tool compiles them as a whole
by designating the package path.
"""
return _generate_go_package(target, source, env)
def generate_go_binary(target, source, env):
"""Generate go command executable. """
return _generate_go_package(target, source, env)
def generate_go_test(target, source, env):
"""Generate go test binary. """
go, go_home = env['GOCMD'], env['GOHOME']
cmd = 'GOPATH=%s %s test -c -o %s %s' % (
go_home, go, target[0], env['GOPACKAGE'])
return echospawn(args=[cmd], env=os.environ, sh=None, cmd=None, escape=None)
def MakeAction(cmd, cmdstr):
if console.verbosity_compare(_verbosity, 'verbose') >= 0:
return SCons.Action.Action(cmd)
else:
return SCons.Action.Action(cmd, cmdstr)
_ERRORS = [': error:', ': fatal error:', ': undefined reference to',
': cannot find ', ': ld returned 1 exit status',
' is not defined'
]
_WARNINGS = [': warning:', ': note: ', '] Warning: ']
def error_colorize(message):
colored_message = []
for line in message.splitlines(True): # keepends
color = 'cyan'
# For clang column indicator, such as '^~~~~~'
if line.strip().startswith('^'):
color = 'green'
else:
for w in _WARNINGS:
if w in line:
color = 'yellow'
break
for w in _ERRORS:
if w in line:
color = 'red'
break
colored_message.append(console.colored(line, color))
return console.inerasable(''.join(colored_message))
def _echo(stdout, stderr):
"""Echo messages to stdout and stderr. """
if stdout:
sys.stdout.write(stdout)
if stderr:
sys.stderr.write(stderr)
if blade_error_log:
blade_error_log.write(stderr)
def echospawn(sh, escape, cmd, args, env):
# convert env from unicode strings
asciienv = {}
for key, value in iteritems(env):
asciienv[key] = str(value)
cmdline = ' '.join(args)
console.debug(cmdline)
p = subprocess.Popen(cmdline,
env=asciienv,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True,
universal_newlines=True)
stdout, stderr = p.communicate()
if console.verbosity_compare(_verbosity, 'verbose') < 0:
if stdout:
stdout = error_colorize(stdout)
if stderr:
stderr = error_colorize(stderr)
if p.returncode:
if p.returncode != -signal.SIGINT:
# Error
_echo(stdout, stderr)
else:
# Only warnings
_echo(stdout, stderr)
return p.returncode
def _blade_action_postfunc(closing_message):
"""To do post jobs if blade's own actions failed to build. """
console.info(closing_message)
# Remember to write the dblite incase of re-linking once fail to
# build last time. We should elaborate a way to avoid rebuilding
# after failure of our own builders or actions.
SCons.SConsign.write()
def _fast_link_helper(target, source, env, link_com):
"""fast link helper function. """
target_file = str(target[0])
prefix_str = 'blade_%s' % target_file.replace('/', '_').replace('.', '_')
fd, temporary_file = tempfile.mkstemp(suffix='xianxian',
prefix=prefix_str,
dir=linking_tmp_dir)
os.close(fd)
sources = []
for s in source:
sources.append(str(s))
link_com_str = link_com.substitute(
FL_TARGET=temporary_file,
FL_SOURCE=' '.join(sources))
p = subprocess.Popen(
link_com_str,
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
std_out, std_err = p.communicate()
if std_out:
print
std_out
if std_err:
print
std_err
if p.returncode == 0:
shutil.move(temporary_file, target_file)
if not os.path.exists(target_file):
console.warning('failed to genreate %s in link on tmpfs mode' % target_file)
else:
_blade_action_postfunc('failed while fast linking')
return p.returncode
def fast_link_sharelib_action(target, source, env):
# $SHLINK -o $TARGET $SHLINKFLAGS $__RPATH $SOURCES $_LIBDIRFLAGS $_LIBFLAGS
link_com = string.Template('%s -o $FL_TARGET %s %s $FL_SOURCE %s %s' % (
env.subst('$SHLINK'),
env.subst('$SHLINKFLAGS'),
env.subst('$__RPATH'),
env.subst('$_LIBDIRFLAGS'),
env.subst('$_LIBFLAGS')))
return _fast_link_helper(target, source, env, link_com)
def fast_link_prog_action(target, source, env):
# $LINK -o $TARGET $LINKFLAGS $__RPATH $SOURCES $_LIBDIRFLAGS $_LIBFLAGS
link_com = string.Template('%s -o $FL_TARGET %s %s $FL_SOURCE %s %s' % (
env.subst('$LINK'),
env.subst('$LINKFLAGS'),
env.subst('$__RPATH'),
env.subst('$_LIBDIRFLAGS'),
env.subst('$_LIBFLAGS')))
return _fast_link_helper(target, source, env, link_com)
def setup_fast_link_prog_builder(top_env):
"""
This is the function to setup blade fast link
program builder. It will overwrite the program
builder of top level env if user specifies an
option to apply fast link method that they want
to place the blade output to distributed file
system to advoid the random read write of linker
largely degrades building performance.
"""
new_link_action = MakeAction(fast_link_prog_action, '$LINKCOMSTR')
program = SCons.Builder.Builder(action=new_link_action,
emitter='$PROGEMITTER',
prefix='$PROGPREFIX',
suffix='$PROGSUFFIX',
src_suffix='$OBJSUFFIX',
src_builder='Object',
target_scanner=SCons.Scanner.Prog.ProgramScanner())
top_env['BUILDERS']['Program'] = program
def setup_fast_link_sharelib_builder(top_env):
"""
This is the function to setup blade fast link
sharelib builder. It will overwrite the sharelib
builder of top level env if user specifies an
option to apply fast link method that they want
to place the blade output to distributed file
system to advoid the random read write of linker
largely degrades building performance.
"""
new_link_actions = []
new_link_actions.append(SCons.Defaults.SharedCheck)
new_link_actions.append(MakeAction(fast_link_sharelib_action, '$SHLINKCOMSTR'))
sharedlib = SCons.Builder.Builder(action=new_link_actions,
emitter='$SHLIBEMITTER',
prefix='$SHLIBPREFIX',
suffix='$SHLIBSUFFIX',
target_scanner=SCons.Scanner.Prog.ProgramScanner(),
src_suffix='$SHOBJSUFFIX',
src_builder='SharedObject')
top_env['BUILDERS']['SharedLibrary'] = sharedlib
def setup_fast_link_builders(top_env):
"""Creates fast link builders - Program and SharedLibrary. """
# Check requirement
acquire_temp_place = "df | grep tmpfs | awk '{print $5, $6}'"
p = subprocess.Popen(
acquire_temp_place,
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
stdout, stderr = p.communicate()
# Do not try to overwrite builder with error
if p.returncode:
console.warning('you have link on tmp enabled, but it is not fullfilled to make it.')
return
# No tmpfs to do fastlink, will not overwrite the builder
if not stdout:
console.warning('you have link on tmp enabled, but there is no tmpfs to make it.')
return
# Use the first one
global linking_tmp_dir
usage, linking_tmp_dir = tuple(stdout.splitlines(False)[0].split())
# Do not try to do that if there is no memory space left
usage = int(usage.replace('%', ''))
if usage > 90:
console.warning('you have link on tmp enabled, '
'but there is not enough space on %s to make it.' %
linking_tmp_dir)
return
console.info('building in link on tmpfs mode')
setup_fast_link_sharelib_builder(top_env)
setup_fast_link_prog_builder(top_env)
def make_top_env(build_dir):
"""Make the top level scons envrionment object"""
os.environ['LC_ALL'] = 'C'
top_env = SCons.Environment.Environment(ENV=os.environ)
top_env.EnsureSConsVersion(2, 0)
# Optimization options, see http://www.scons.org/wiki/GoFastButton
top_env.Decider('MD5-timestamp')
top_env.SetOption('implicit_cache', 1)
top_env.SetOption('max_drift', 1)
top_env.VariantDir(build_dir, '.', duplicate=0)
return top_env
def get_compile_source_message():
return console.erasable('%sCompiling %s$SOURCE%s%s' % (
color('cyan'), color('purple'), color('cyan'), color('end')))
def get_link_program_message():
return console.erasable('%sLinking Program %s$TARGET%s%s' % (
color('green'), color('purple'), color('green'), color('end')))
def setup_compliation_verbosity(top_env, color_enabled, verbosity):
"""Generates color and verbose message. """
console.enable_color(color_enabled)
global _verbosity
_verbosity = verbosity
top_env["SPAWN"] = echospawn
compile_source_message = get_compile_source_message()
link_program_message = get_link_program_message()
assembling_source_message = console.erasable('%sAssembling %s$SOURCE%s%s' % (
color('cyan'), color('purple'), color('cyan'), color('end')))
link_library_message = console.erasable('%sCreating Static Library %s$TARGET%s%s' % (
color('green'), color('purple'), color('green'), color('end')))
ranlib_library_message = console.erasable('%sRanlib Library %s$TARGET%s%s' % (
color('green'), color('purple'), color('green'), color('end')))
link_shared_library_message = console.erasable('%sLinking Shared Library %s$TARGET%s%s' % (
color('green'), color('purple'), color('green'), color('end')))
jar_message = console.erasable('%sCreating Jar %s$TARGET%s%s' % (
color('green'), color('purple'), color('green'), color('end')))
if console.verbosity_compare(verbosity, 'verbose') < 0:
top_env.Append(
CXXCOMSTR=compile_source_message,
CCCOMSTR=compile_source_message,
ASCOMSTR=assembling_source_message,
ASPPCOMSTR=assembling_source_message,
SHCCCOMSTR=compile_source_message,
SHCXXCOMSTR=compile_source_message,
ARCOMSTR=link_library_message,
RANLIBCOMSTR=ranlib_library_message,
SHLINKCOMSTR=link_shared_library_message,
LINKCOMSTR=link_program_message,
JAVACCOMSTR=compile_source_message,
JARCOMSTR=jar_message,
LEXCOMSTR=compile_source_message,
YACCCOMSTR=compile_source_message)
def proto_scan_func(node, env, path, arg):
contents = node.get_text_contents()
protos = proto_import_re.findall(contents)
protos += proto_import_public_re.findall(contents)
if not protos:
return []
def _find_proto(proto, path):
for dir in path:
f = os.path.join(str(dir), proto)
if os.path.exists(f):
return f
return ''
results = []
for proto in protos:
f = _find_proto(proto, path)
if f:
results.append(f)
public_protos = proto_import_public_re.findall(open(f).read())
for public_proto in public_protos:
public_proto = _find_proto(public_proto, path)
if public_proto:
results.append(public_proto)
return env.File(results)
def setup_proto_builders(top_env, build_dir, protoc_bin, protoc_java_bin,
protobuf_path, protobuf_incs_str, protobuf_java_incs,
protoc_php_plugin, protobuf_php_path, protoc_go_plugin):
# pylint: disable=too-many-locals
compile_proto_cc_message = console.erasable('%sCompiling %s$SOURCE%s to cc source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
compile_proto_java_message = console.erasable('%sCompiling %s$SOURCE%s to java source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
compile_proto_php_message = console.erasable('%sCompiling %s$SOURCE%s to php source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
compile_proto_python_message = console.erasable('%sCompiling %s$SOURCE%s to python source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
compile_proto_go_message = console.erasable('%sCompiling %s$SOURCE%s to go source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
copy_proto_go_source_message = console.erasable('%sCopying %s$SOURCE%s to go directory%s' %
(color('cyan'), color('purple'), color('cyan'), color('end')))
generate_proto_descriptor_message = console.erasable('%sGenerating proto descriptor set %s$TARGET%s%s' % (
color('green'), color('purple'), color('green'), color('end')))
proto_bld = SCons.Builder.Builder(action=MakeAction(
"%s --proto_path=. -I. %s -I=`dirname $SOURCE` --cpp_out=%s $PROTOCFLAGS $PROTOCCPPPLUGINFLAGS $SOURCE" % (
protoc_bin, protobuf_incs_str, build_dir),
compile_proto_cc_message))
top_env.Append(BUILDERS={"Proto": proto_bld})
proto_java_bld = SCons.Builder.Builder(action=MakeAction(
"%s --proto_path=. %s --java_out=%s/`dirname $SOURCE` $PROTOCJAVAPLUGINFLAGS $SOURCE" % (
protoc_java_bin, protobuf_java_incs, build_dir),
compile_proto_java_message))
top_env.Append(BUILDERS={"ProtoJava": proto_java_bld})
proto_php_bld = SCons.Builder.Builder(action=MakeAction(
"%s --proto_path=. --plugin=protoc-gen-php=%s -I. %s -I%s -I=`dirname $SOURCE` --php_out=%s/`dirname $SOURCE` $SOURCE" % (
protoc_bin, protoc_php_plugin, protobuf_incs_str, protobuf_php_path, build_dir),
compile_proto_php_message))
top_env.Append(BUILDERS={"ProtoPhp": proto_php_bld})
proto_python_bld = SCons.Builder.Builder(action=MakeAction(
"%s --proto_path=. -I. %s -I=`dirname $SOURCE` --python_out=%s $PROTOCPYTHONPLUGINFLAGS $SOURCE" % (
protoc_bin, protobuf_incs_str, build_dir),
compile_proto_python_message))
top_env.Append(BUILDERS={"ProtoPython": proto_python_bld})
proto_go_bld = SCons.Builder.Builder(action=MakeAction(
generate_proto_go_source, compile_proto_go_message),
PROTOC=protoc_bin, PROTOCGOPLUGIN=protoc_go_plugin,
PROTOBUFINCS=protobuf_incs_str, BUILDDIR=build_dir)
top_env.Append(BUILDERS={"ProtoGo": proto_go_bld})
proto_go_source_bld = SCons.Builder.Builder(
action=MakeAction(copy_proto_go_source, copy_proto_go_source_message))
top_env.Append(BUILDERS={"ProtoGoSource": proto_go_source_bld})
proto_descriptor_bld = SCons.Builder.Builder(action=MakeAction(
'%s --proto_path=. -I. %s -I=`dirname $SOURCE` '
'--descriptor_set_out=$TARGET --include_imports --include_source_info '
'$SOURCES' % (protoc_bin, protobuf_incs_str),
generate_proto_descriptor_message))
top_env.Append(BUILDERS={"ProtoDescriptors": proto_descriptor_bld})
top_env.Replace(PROTOCFLAGS="",
PROTOCCPPPLUGINFLAGS="",
PROTOCJAVAPLUGINFLAGS="",
PROTOCPYTHONPLUGINFLAGS="")
top_env.Append(PROTOPATH=['.', protobuf_path])
proto_scanner = top_env.Scanner(name='ProtoScanner',
function=proto_scan_func,
argument=None,
skeys=['.proto'],
path_function=SCons.Scanner.FindPathDirs('PROTOPATH'))
top_env.Append(SCANNERS=proto_scanner)
def setup_thrift_builders(top_env, build_dir, thrift_bin, thrift_incs_str, thrift_gen_params):
compile_thrift_cc_message = console.erasable('%sCompiling %s$SOURCE%s to cc source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
compile_thrift_java_message = console.erasable('%sCompiling %s$SOURCE%s to java source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
compile_thrift_python_message = console.erasable('%sCompiling %s$SOURCE%s to python source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
thrift_bld = SCons.Builder.Builder(action=MakeAction(
'%s --gen %s -I . %s -I `dirname $SOURCE`'
' -out %s/`dirname $SOURCE` $SOURCE' % (
thrift_bin, thrift_gen_params, thrift_incs_str, build_dir),
compile_thrift_cc_message))
top_env.Append(BUILDERS={"Thrift": thrift_bld})
thrift_java_bld = SCons.Builder.Builder(action=MakeAction(
"%s --gen java -I . %s -I `dirname $SOURCE` -out %s/`dirname $SOURCE` $SOURCE" % (
thrift_bin, thrift_incs_str, build_dir),
compile_thrift_java_message))
top_env.Append(BUILDERS={"ThriftJava": thrift_java_bld})
thrift_python_bld = SCons.Builder.Builder(action=MakeAction(
"%s --gen py -I . %s -I `dirname $SOURCE` -out %s/`dirname $SOURCE` $SOURCE" % (
thrift_bin, thrift_incs_str, build_dir),
compile_thrift_python_message))
top_env.Append(BUILDERS={"ThriftPython": thrift_python_bld})
def setup_fbthrift_builders(top_env, build_dir, fbthrift1_bin, fbthrift2_bin, fbthrift_incs_str):
compile_fbthrift_cpp_message = console.erasable('%sCompiling %s$SOURCE%s to cpp source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
compile_fbthrift_cpp2_message = console.erasable('%sCompiling %s$SOURCE%s to cpp2 source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
fbthrift1_bld = SCons.Builder.Builder(action=MakeAction(
'%s --gen cpp:templates,cob_style,include_prefix,enum_strict -I . %s -I `dirname $SOURCE`'
' -o %s/`dirname $SOURCE` $SOURCE' % (
fbthrift1_bin, fbthrift_incs_str, build_dir),
compile_fbthrift_cpp_message))
top_env.Append(BUILDERS={"FBThrift1": fbthrift1_bld})
fbthrift2_bld = SCons.Builder.Builder(action=MakeAction(
'%s --gen=cpp2:cob_style,include_prefix,future -I . %s -I `dirname $SOURCE` '
'-o %s/`dirname $SOURCE` $SOURCE' % (
fbthrift2_bin, fbthrift_incs_str, build_dir),
compile_fbthrift_cpp2_message))
top_env.Append(BUILDERS={"FBThrift2": fbthrift2_bld})
def setup_cuda_builders(top_env, nvcc_str, cuda_incs_str):
nvcc_object_bld = SCons.Builder.Builder(action=MakeAction(
"%s -ccbin g++ %s $NVCCFLAGS -o $TARGET -c $SOURCE" % (nvcc_str, cuda_incs_str),
get_compile_source_message()))
top_env.Append(BUILDERS={"NvccObject": nvcc_object_bld})
nvcc_binary_bld = SCons.Builder.Builder(action=MakeAction(
"%s %s $NVCCFLAGS -o $TARGET" % (nvcc_str, cuda_incs_str),
get_link_program_message()))
top_env.Append(NVCC=nvcc_str)
top_env.Append(BUILDERS={"NvccBinary": nvcc_binary_bld})
def setup_java_builders(top_env, java_home, one_jar_boot_path):
# pylint: disable=too-many-locals
if java_home:
top_env.Replace(JAVA=os.path.join(java_home, 'bin/java'))
top_env.Replace(JAVAC=os.path.join(java_home, 'bin/javac'))
top_env.Replace(JAR=os.path.join(java_home, 'bin/jar'))
blade_jar_bld = SCons.Builder.Builder(action=MakeAction(
'jar cf $TARGET -C `dirname $SOURCE` .',
'$JARCOMSTR'))
top_env.Append(BUILDERS={"BladeJar": blade_jar_bld})
# Scons has many bugs with generated sources file,
# such as can't obtain class file path correctly.
# so just build all sources to jar directly
generated_jar_bld = SCons.Builder.Builder(action=MakeAction(
'rm -fr ${TARGET}.classes && mkdir -p ${TARGET}.classes && '
'$JAVAC $JAVACFLAGS $_JAVABOOTCLASSPATH $_JAVACLASSPATH -d ${TARGET}.classes $SOURCES && '
'$JAR $JARFLAGS ${TARGET} -C ${TARGET}.classes . && '
'rm -fr ${TARGET}.classes',
'$JARCOMSTR'))
top_env.Append(BUILDERS={"GeneratedJavaJar": generated_jar_bld})
# Scons Java builder has bugs on detecting generated .class files
# produced by javac: anonymous inner classes are missing in the results
# of Java builder no matter which JAVAVERSION(1.5, 1.6) is specified
# See: http://scons.tigris.org/issues/show_bug.cgi?id=1594
# http://scons.tigris.org/issues/show_bug.cgi?id=2742
blade_java_jar_bld = SCons.Builder.Builder(action=MakeAction(
generate_java_jar, '$JARCOMSTR'))
top_env.Append(BUILDERS={"BladeJavaJar": blade_java_jar_bld})
resource_message = console.erasable('%sProcess Jar Resource %s$SOURCES%s%s' % ( \
color('cyan'), color('purple'), color('cyan'), color('end')))
java_resource_bld = SCons.Builder.Builder(
action=MakeAction(process_java_resources, resource_message))
top_env.Append(BUILDERS={"JavaResource": java_resource_bld})
source_message = console.erasable('%sProcess Java Source %s$SOURCES%s%s' % ( \
color('cyan'), color('purple'), color('cyan'), color('end')))
java_source_bld = SCons.Builder.Builder(
action=MakeAction(process_java_sources, source_message))
top_env.Append(BUILDERS={"JavaSource": java_source_bld})
global _one_jar_boot_path
_one_jar_boot_path = one_jar_boot_path
one_java_message = console.erasable('%sGenerating One Jar %s$TARGET%s%s' % ( \
color('cyan'), color('purple'), color('cyan'), color('end')))
one_jar_bld = SCons.Builder.Builder(action=MakeAction(generate_one_jar,
one_java_message))
top_env.Append(BUILDERS={'OneJar': one_jar_bld})
fat_java_message = console.erasable('%sCreating Fat Jar %s$TARGET%s%s' % ( \
color('green'), color('purple'), color('green'), color('end')))
fat_jar_bld = SCons.Builder.Builder(action=MakeAction(generate_fat_jar,
fat_java_message))
top_env.Append(BUILDERS={'FatJar': fat_jar_bld})
java_binary_message = console.erasable('%sGenerating Java Binary %s$TARGET%s%s' % \
(color('green'), color('purple'), color('green'), color('end')))
java_binary_bld = SCons.Builder.Builder(action=MakeAction(
generate_java_binary, java_binary_message))
top_env.Append(BUILDERS={"JavaBinary": java_binary_bld})
java_test_message = console.erasable('%sGenerating Java Test %s$TARGET%s%s' % \
(color('green'), color('purple'), color('green'), color('end')))
java_test_bld = SCons.Builder.Builder(action=MakeAction(
generate_java_test, java_test_message))
top_env.Append(BUILDERS={"JavaTest": java_test_bld})
def setup_scala_builders(top_env, scala_home):
if scala_home:
top_env.Replace(SCALAC=os.path.join(scala_home, 'bin/scalac'))
top_env.Replace(SCALA=os.path.join(scala_home, 'bin/scala'))
scala_jar_bld = SCons.Builder.Builder(action=MakeAction(
generate_scala_jar, '$JARCOMSTR'))
top_env.Append(BUILDERS={"ScalaJar": scala_jar_bld})
scala_test_message = console.erasable('%sGenerating Scala Test %s$TARGET%s%s' % \
(color('green'), color('purple'), color('green'), color('end')))
scala_test_bld = SCons.Builder.Builder(action=MakeAction(
generate_scala_test, scala_test_message))
top_env.Append(BUILDERS={"ScalaTest": scala_test_bld})
def setup_go_builders(top_env, go_cmd, go_home):
if go_cmd:
top_env.Replace(GOCMD=go_cmd)
if go_home:
top_env.Replace(GOHOME=go_home)
go_library_message = console.erasable('%sGenerating Go Package %s$TARGET%s%s' %
(color('green'), color('purple'), color('green'), color('end')))
go_library_builder = SCons.Builder.Builder(action=MakeAction(
generate_go_library, go_library_message))
top_env.Append(BUILDERS={"GoLibrary": go_library_builder})
go_binary_message = console.erasable('%sGenerating Go Executable %s$TARGET%s%s' %
(color('green'), color('purple'), color('green'), color('end')))
go_binary_builder = SCons.Builder.Builder(action=MakeAction(
generate_go_binary, go_binary_message))
top_env.Append(BUILDERS={"GoBinary": go_binary_builder})
go_test_message = console.erasable('%sGenerating Go Test %s$TARGET%s%s' %
(color('green'), color('purple'), color('green'), color('end')))
go_test_builder = SCons.Builder.Builder(action=MakeAction(
generate_go_test, go_test_message))
top_env.Append(BUILDERS={"GoTest": go_test_builder})
def setup_lex_yacc_builders(top_env):
top_env.Replace(LEXCOM="$LEX $LEXFLAGS -o $TARGET $SOURCES")
def setup_resource_builders(top_env):
compile_resource_index_message = console.erasable(
'%sGenerating resource index for %s$SOURCE_PATH/$TARGET_NAME%s%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
compile_resource_message = console.erasable('%sCompiling %s$SOURCE%s as resource file%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
resource_index_bld = SCons.Builder.Builder(action=MakeAction(generate_resource_index,
compile_resource_index_message))
resource_file_bld = SCons.Builder.Builder(action=MakeAction(generate_resource_file,
compile_resource_message))
top_env.Append(BUILDERS={"ResourceIndex": resource_index_bld})
top_env.Append(BUILDERS={"ResourceFile": resource_file_bld})
def setup_python_builders(top_env):
compile_python_library_message = console.erasable('%sGenerating Python Library %s$TARGET%s%s' % \
(color('green'), color('purple'), color('green'), color('end')))
compile_python_binary_message = console.erasable('%sGenerating Python Binary %s$TARGET%s%s' % \
(color('green'), color('purple'), color('green'), color('end')))
python_library_bld = SCons.Builder.Builder(action=MakeAction(generate_python_library,
compile_python_library_message))
python_binary_bld = SCons.Builder.Builder(action=MakeAction(generate_python_binary,
compile_python_binary_message))
top_env.Append(BUILDERS={"PythonLibrary": python_library_bld})
top_env.Append(BUILDERS={"PythonBinary": python_binary_bld})
def setup_package_builders(top_env):
source_message = console.erasable('%sProcess Package Source %s$SOURCES%s%s' % (
color('cyan'), color('purple'), color('cyan'), color('end')))
source_bld = SCons.Builder.Builder(
action=MakeAction(process_package_source, source_message))
top_env.Append(BUILDERS={"PackageSource": source_bld})
package_message = console.erasable('%sCreating Package %s$TARGET%s%s' % (
color('green'), color('purple'), color('green'), color('end')))
package_bld = SCons.Builder.Builder(
action=MakeAction(generate_package, package_message))
top_env.Append(BUILDERS={"Package": package_bld})
def setup_shell_builders(top_env):
shell_test_data_message = console.erasable('%sGenerating Shell Test Data %s$TARGET%s%s' %
(color('cyan'), color('purple'), color('cyan'), color('end')))
shell_test_data_bld = SCons.Builder.Builder(action=MakeAction(
generate_shell_test_data, shell_test_data_message))
top_env.Append(BUILDERS={"ShellTestData": shell_test_data_bld})
shell_test_message = console.erasable('%sGenerating Shell Test %s$TARGET%s%s' %
(color('green'), color('purple'), color('green'), color('end')))
shell_test_bld = SCons.Builder.Builder(action=MakeAction(
generate_shell_test, shell_test_message))
top_env.Append(BUILDERS={"ShellTest": shell_test_bld})
def setup_other_builders(top_env):
setup_lex_yacc_builders(top_env)
setup_resource_builders(top_env)
setup_python_builders(top_env)
setup_package_builders(top_env)
setup_shell_builders(top_env)
def setup_swig_builders(top_env, build_dir):
compile_swig_python_message = console.erasable('%sCompiling %s$SOURCE%s to python source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
compile_swig_java_message = console.erasable('%sCompiling %s$SOURCE%s to java source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
compile_swig_php_message = console.erasable('%sCompiling %s$SOURCE%s to php source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
# Python
swig_py_bld = SCons.Builder.Builder(action=MakeAction(
'swig -python -threads $SWIGPYTHONFLAGS -c++ -I%s -o $TARGET $SOURCE' % (build_dir),
compile_swig_python_message))
top_env.Append(BUILDERS={"SwigPython": swig_py_bld})
# Java
swig_java_bld = SCons.Builder.Builder(action=MakeAction(
'swig -java $SWIGJAVAFLAGS -c++ -I%s -o $TARGET $SOURCE' % (build_dir),
compile_swig_java_message))
top_env.Append(BUILDERS={'SwigJava': swig_java_bld})
swig_php_bld = SCons.Builder.Builder(action=MakeAction(
'swig -php $SWIGPHPFLAGS -c++ -I%s -o $TARGET $SOURCE' % (build_dir),
compile_swig_php_message))
top_env.Append(BUILDERS={"SwigPhp": swig_php_bld})
def _exec_get_version_info(cmd, cwd):
lc_all_env = os.environ
lc_all_env['LC_ALL'] = 'POSIX'
p = subprocess.Popen(cmd,
env=lc_all_env,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
stdout, stderr = p.communicate()
if p.returncode:
return None
else:
return stdout.replace('"', '\\"').replace('\n', '\\n"\n"')
def _get_version_info(blade_root_dir, svn_roots):
"""Gets svn root dir info. """
svn_info_map = {}
if os.path.exists("%s/.git" % blade_root_dir):
cmd = "git log -n 1"
dirname = os.path.dirname(blade_root_dir)
version_info = _exec_get_version_info(cmd, None)
if version_info:
svn_info_map[dirname] = version_info
return svn_info_map
for root_dir in svn_roots:
root_dir_realpath = os.path.realpath(root_dir)
svn_working_dir = os.path.dirname(root_dir_realpath)
svn_dir = os.path.basename(root_dir_realpath)
cmd = 'svn info %s' % svn_dir
version_info = _exec_get_version_info(cmd, svn_working_dir)
if not version_info:
cmd = 'git ls-remote --get-url && git branch | grep "*" && git log -n 1'
version_info = _exec_get_version_info(cmd, root_dir_realpath)
if not version_info:
console.warning('Failed to get version control info in %s' % root_dir)
if version_info:
svn_info_map[root_dir] = version_info
return svn_info_map
def generate_version_file(top_env, blade_root_dir, build_dir,
profile, gcc_version, svn_roots):
"""Generate version information files. """
svn_info_map = _get_version_info(blade_root_dir, svn_roots)
svn_info_len = len(svn_info_map)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
filename = os.path.join(build_dir, 'version.c')
with open(filename, 'w') as version_c:
version_c.write('/* This file was generated by blade */\n')
version_c.write(('''
extern const int kSvnInfoCount;
extern const char* const kSvnInfo[];
extern const char kBuildType[];
extern const char kBuildTime[];
extern const char kBuilderName[];
extern const char kHostName[];
extern const char kCompiler[];
'''))
version_c.write('const int kSvnInfoCount = %d;\n' % svn_info_len)
version_c.write('const char* const kSvnInfo[%d] = {%s};\n' % (
svn_info_len, ', '.join(['"%s"' % v for v in svn_info_map.values()])))
version_c.write('const char kBuildType[] = "%s";\n' % profile)
version_c.write('const char kBuildTime[] = "%s";\n' % time.asctime())
version_c.write('const char kBuilderName[] = "%s";\n' % os.getenv('USER'))
version_c.write((
'const char kHostName[] = "%s";\n' % socket.gethostname()))
compiler = 'GCC %s' % gcc_version
version_c.write('const char kCompiler[] = "%s";\n' % compiler)
env_version = top_env.Clone()
env_version.Replace(SHCXXCOMSTR=console.erasable(
'%sUpdating version information%s' % (
color('cyan'), color('end'))))
return env_version.SharedObject(filename)
|
the-stack_0_13101 | #!/usr/bin/python
import multiprocessing
import containerstats
import etcd
import platform
import docker
import time
import os
import requests
dockerconnection = docker.Client(base_url='unix://var/run/docker.sock', timeout=2)
dockerconnection.close()
def getstats(obj):
etcd.CreateDir(DDS_ETCD_URL, platform.node() + '/' + obj.containername, DDS_CONTAINER_TTL)
etcd.SetValue(DDS_ETCD_URL, platform.node() + '/' + obj.containername + '/cpuusage',
obj.getcontainercpuusage(dockerconnection)['cpuusage'])
etcd.SetValue(DDS_ETCD_URL, platform.node() + '/' + obj.containername + '/memusage',
obj.getcontainermemusage(dockerconnection)['memusage'])
etcd.SetValue(DDS_ETCD_URL, platform.node() + '/' + obj.containername + '/memusagepercent',
obj.getcontainermemusage(dockerconnection)['memusagepercent'])
etcd.SetValue(DDS_ETCD_URL, platform.node() + '/' + obj.containername + '/netrx',
obj.getcontainernetusage(dockerconnection)['netrx'])
etcd.SetValue(DDS_ETCD_URL, platform.node() + '/' + obj.containername + '/nettx',
obj.getcontainernetusage(dockerconnection)['nettx'])
return True
if __name__ == '__main__':
if 'DDS_ETCD_URL' in os.environ:
DDS_ETCD_URL = os.environ['DDS_ETCD_URL']
else:
DDS_ETCD_URL = 'http://127.0.0.1:4001/v2/keys/'
if 'DDS_CONCURRENCY_LEVEL' in os.environ:
DDS_CONCURRENCY_LEVEL = os.environ['DDS_CONCURRENCY_LEVEL']
else:
DDS_CONCURRENCY_LEVEL = 8
# start values
DDS_HOST_TTL = 120
DDS_CONTAINER_TTL = 30
while True:
newpool = multiprocessing.Pool(processes=DDS_CONCURRENCY_LEVEL)
etcd.CreateDir(DDS_ETCD_URL, platform.node(), ttl=DDS_HOST_TTL)
containerlist = containerstats.getrunningcontainers(dockerconnection)
objlist = []
for container in containerlist:
objlist.append(containerstats.ContainerStats(container))
gatherstart = time.time()
# when i.e. container stop during data gathering timeout generated
try:
newpool.map(getstats, objlist)
except requests.packages.urllib3.exceptions.ReadTimeoutError:
pass
newpool.close()
gatherstop = time.time()
gatherduration = int(gatherstop - gatherstart)
DDS_HOST_TTL = gatherduration * 5
DDS_CONTAINER_TTL = gatherduration * 3
time.sleep(gatherduration)
|
the-stack_0_13102 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Parsing routines for the ODB++ line record text format
according to the ODB++ 7.0 specification:
http://www.odb-sa.com/wp-content/uploads/ODB_Format_Description_v7.pdf
"""
from collections import defaultdict
from .Utils import readFileLines, readZIPFileLines
def filter_line_record_lines(lines):
"Remove empty and '#'-only lines from the given line list"
return [
line for line in lines
if line and line != "#"
]
def read_raw_linerecords(filename):
"Read a .Z line record file and return only important lines in order"
try: # Assume file-like object
return filter_line_record_lines(filename.read().split("\n"))
except AttributeError:
open_fn = readZIPFileLines if filename.endswith(".Z") else readFileLines
return filter_line_record_lines(
open_fn(filename))
def group_by_section(lines):
"Group a line record file by the section. Returns a dict containing lists."
groups = defaultdict(list)
name = None
for line in lines:
if line.startswith("#"):
name = line.strip("#").strip()
else:
groups[name].append(line)
return dict(groups)
def read_linerecords(filename):
"Read a linerecord file and return a dict grouped by section"
return group_by_section(read_raw_linerecords(filename))
|
the-stack_0_13104 | import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
#get_ipython().magic('matplotlib inline')
b1 = tsds.load_ozone()
df = b1.mPastData
for k in [1 , 5]:
df[b1.mTimeVar + "_" + str(k) + '_Daily'] = pd.date_range('2000-1-1', periods=df.shape[0], freq=str(k) + 'D')
#df.to_csv("outputs/ozone_WDHMS.csv");
#df.tail(10)
#df[:-10].tail()
#df[:-10:-1]
#df.describe()
for k in [1 , 5]:
for timevar in [b1.mTimeVar + "_" + str(k) + '_Daily']:
lEngine = autof.cForecastEngine()
lEngine
H = b1.mHorizon;
# lEngine.mOptions.enable_slow_mode();
# lEngine.mOptions.mDebugPerformance = True;
lEngine.mOptions.set_active_autoregressions([]);
lEngine.train(df , timevar , b1.mSignalVar, H);
lEngine.getModelInfo();
print(lEngine.mSignalDecomposition.mTrPerfDetails.head());
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
dfapp_in = df.copy();
dfapp_in.tail()
# H = 12
dfapp_out = lEngine.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/ozone_" + timevar + "apply_out.csv")
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[timevar , b1.mSignalVar, b1.mSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H));
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
# lEngine.standardPlots(name = "outputs/ozone_" + timevar)
|
the-stack_0_13108 | import requests
import os
import time
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
# First we want to check the robots.txt file of our objective website
# A function get_robot_txt is constructed to check any url
def get_robot_txt(url):
if url.endswith('/'):
path = url
else:
path = url + '/'
req = requests.get(path + "robots.txt", data=None)
return req.text
# Objective website
URL = "https://www.mercadolibre.com.co"
# Read robots.txt file
print('robots.txt:', get_robot_txt(URL))
sites = [
{
'country': 'Argentina',
'url': 'https://listado.mercadolibre.com.ar'
},
{
'country': 'Bolivia',
'url': 'https://listado.mercadolibre.com.bo'
},
{
'country': 'Brasil',
'url': 'https://lista.mercadolivre.com.br'
},
{
'country': 'Chile',
'url': 'https://listado.mercadolibre.cl'
},
{
'country': 'Colombia',
'url': 'https://listado.mercadolibre.com.co'
},
{
'country': 'Costa Rica',
'url': 'https://listado.mercadolibre.co.cr'
},
{
'country': 'Dominicana',
'url': 'https://listado.mercadolibre.com.do'
},
{
'country': 'Ecuador',
'url': 'https://listado.mercadolibre.com.ec'
},
{
'country': 'Guatemala',
'url': 'https://listado.mercadolibre.com.gt'
},
{
'country': 'Honduras',
'url': 'https://listado.mercadolibre.com.hn'
},
{
'country': 'México',
'url': 'https://listado.mercadolibre.com.mx'
},
{
'country': 'Nicaragua',
'url': 'https://listado.mercadolibre.com.ni'
},
{
'country': 'Panamá',
'url': 'https://listado.mercadolibre.com.pa'
},
{
'country': 'Paraguay',
'url': 'https://listado.mercadolibre.com.py'
},
{
'country': 'Perú',
'url': 'https://listado.mercadolibre.com.pe'
},
{
'country': 'El Salvador',
'url': 'https://listado.mercadolibre.com.sv'
},
{
'country': 'Uruguay',
'url': 'https://listado.mercadolibre.com.uy'
},
{
'country': 'Venezuela',
'url': 'https://listado.mercadolibre.com.ve'
},
]
products = [
{
'name': 'playstation',
'uri': 'playstation-5#D[A:playstation%205]',
},
{
'name': 'macbook pro',
'uri': 'macbook-pro-13#D[A:macbook%20pro%2013]',
},
{
'name': 'iphone',
'uri': 'iphone-11-512#D[A:iphone%2011%20512]',
},
{
'name': 'bmw s1000rr',
'uri': 'bmw-s1000rr#D[A:bmw%20s1000rr]',
},
{
'name': 'alexa echo',
'uri': 'alexa-echo-4#D[A:alexa%20echo%204]',
},
]
# Setting options for the webdriver
option = webdriver.ChromeOptions()
option.add_argument(" — incognito") # open incognito mode
# set our UserAgent name, in this case AcademicCrawler
option.add_argument("user-agent=AcademicCrawler")
# Getting current folder path
#My_path = os.path.dirname(os.path.abspath(__file__))
# Delay/Pause of download Throttling
TimeOut = 2 # sec
# Looking for the chromedriver file (Download from http://chromedriver.chromium.org/downloads)
#browser = webdriver.Chrome(executable_path=My_path + '/chromedriver', chrome_options=option)
browser = webdriver.Chrome(ChromeDriverManager().install(), options=option)
# Check if our UseraAgent is OK
agent = browser.execute_script("return navigator.userAgent")
print('agent:', agent)
def get_items_names():
elements = browser.find_elements_by_css_selector(
'#root-app > div > div > section > ol > li > div > div > div.ui-search-result__content-wrapper > div.ui-search-item__group.ui-search-item__group--title > a > h2')
if len(elements) == 0:
print('Caso 2')
elements = browser.find_elements_by_css_selector(
'#root-app > div > div.ui-search-main.ui-search-main--exhibitor.ui-search-main--only-products > section > ol > li > div > div > a > div > div.ui-search-item__group.ui-search-item__group--title > h2')
if len(elements) == 0:
print('Caso 3')
elements = browser.find_elements_by_css_selector(
'#root-app > div > div > section > ol > li > div > div > a > div > div.ui-search-item__group.ui-search-item__group--title > h2')
return elements
def get_items_prices():
prices = browser.find_elements_by_css_selector(
'.ui-search-price:not(.ui-search-price--size-x-tiny) .ui-search-price__second-line')
return prices
filename = "MercadoLibreData.csv"
current_path = os.path.dirname(os.path.abspath(__file__))
filename_path = current_path + '/' + filename
# print('current_path:', current_path)
os.remove(filename_path)
def write_file(text):
with open(filename_path, 'a') as file:
file.write(text)
file.close()
def authenticate_user():
browser.get('https://www.mercadolibre.com/jms/mco/lgz/login?platform_id=ML&go=https%3A%2F%2Fwww.mercadolibre.com.co%2F&loginType=explicit#nav-header')
browser.find_element_by_id("user_id").send_keys(os.getenv('mluser'))
browser.find_element_by_css_selector("button.andes-button > span:nth-child(1)").click()
# no fue posible debido a que requiere captcha y tiene doble factor de autenticación
browser.find_element_by_id("password").send_keys(os.getenv('mlpass'))
try:
write_file('product,country,url,item,precio\n')
for product in products:
for site in sites:
if site['country'] == 'Colombia':
pass
#authenticate_user()
print('looking:', site['country'], ', product:', product['name'])
write_file('"' + product['name'] + '",')
write_file('"' + site['country'] + '",')
# Get content from objective website
url = site['url'] + '/' + product['uri']
# Delay Calculo
t0 = time.time()
# Obtiene el browser
browser.get(url)
# estimación del tiempo de respuesta en segundos
response_delay = time.time() - t0
# espera de 10x, con respecto al tiempo de respuesta
delay_time = 10 * response_delay
print('Wait for...', delay_time, 'seconds')
time.sleep(delay_time)
# Apply delay
browser.implicitly_wait(TimeOut)
write_file('"' + url + '",')
items_names = get_items_names()
items_prices = get_items_prices()
if len(items_prices) > 0:
item_name = items_names[0].text.replace('"', "&dquo;")
print('item_name:', item_name)
write_file('"' + item_name + '",')
item_price = items_prices[0].text.split("\n")[0]
print('item_price:', item_price)
write_file('"' + item_price + '"\n')
else:
write_file('"",\n')
except Exception as e:
print(e)
finally:
pass
#browser.quit()
|
the-stack_0_13109 | from typing import List, Tuple
import q_network
from q_network import Q
import numpy as np
# import gym
import tools
import torch
# buffer hyperparameters
batchsize = 200 # batchsize for buffer sampling
buffer_maxlength = 1000 # max number of tuples held by buffer
episodes_til_buffer_sample = 2
buffer = tools.ReplayBuffer(buffer_maxlength) # buffer holds the memories of the exp replay
# DQL hyperparameters
steps_til_target_update = 50 # time steps for target update
num_episodes = 500 # number of episodes to run
# initialsize = 500 # initial time steps before start training - unused
gamma = .99 # discount
# tracking important things
list_of_episode_rewards = [] # records the reward per episode
q_prime_update_counter = 0 # count the number of steps taken before updating q_prime
# initialize environment
envname = "CartPole-v0"
env = gym.make(envname)
"""
obssize
Num Observation Min Max
0 Cart Position -2.4 2.4
1 Cart Velocity -Inf Inf
2 Pole Angle -41.8° 41.8°
3 Pole Velocity At Tip -Inf Inf
"""
# initialize the principal and the target Q nets
state_dim = env.observation_space.low.size
action_dim = env.action_space.n
lr = 1e-3
q_greedy: Q = Q(state_dim, action_dim, lr)
q_prime: Q = Q(state_dim, action_dim, lr)
for episode in range(num_episodes):
# Initialize and reset environment.
s = env.reset()
d = False
reward_sum = 0
while not d:
q_vals: q_network.QValue = q_greedy.predict_state_value(state=s)
# Choose action w/ epsilon greedy approach
if np.random.rand() < tools.epsilon(episode, num_episodes):
a = torch.tensor(env.action_space.sample())
else:
a = torch.argmax(q_vals)
assert a in [0, 1]
ns, r, d, _ = env.step(int(a)) # Perform action in the env.
d_ = 1 if d else 0
experience = (s, a, r, d_, ns) # experience/memory tuple
# Append experience to the replay buffer.
buffer.append(experience)
# Shorten buffer if it's too long.
while buffer.number > buffer_maxlength:
buffer.pop()
# Training theta by gradients # train_from_buffer_sample()
if (episode % episodes_til_buffer_sample == 0
and buffer.number > batchsize):
experience_batch: List[tools.Experience] = buffer.sample(batchsize)
experience_batch: Tuple[List[torch.Tensor], List[int], List[float],
List[bool], List[torch.Tensor]
] = list(map(list, zip(*experience_batch)))
states, actions, rewards, dones, next_states = experience_batch
q_vals_ns: q_network.QValue = q_prime.predict_state_value(next_states)
max_vals = torch.max(q_vals_ns, dim=1).values # take the max along the columns
targets = torch.tensor(rewards) + torch.tensor(gamma)*max_vals
done_indices = [i for i, d in enumerate(dones) if d]
for idx in done_indices:
targets[idx] = rewards[idx]
q_greedy.train(states, actions, targets) # update_dqn_greedy()
# 5)
if q_prime_update_counter % steps_til_target_update == 0:
tools.update_q_prime(q_greedy, q_prime)
# 6)
q_prime_update_counter += 1
reward_sum += r
s = ns
list_of_episode_rewards.append(reward_sum)
tools.plot_episode_rewards(list_of_episode_rewards, "episode_rewards") |
the-stack_0_13110 | # pylint: disable=I0011,W0613,W0201,W0212,E1101,E1103
from __future__ import absolute_import, division, print_function
import pytest
import numpy as np
from mock import MagicMock
from .. import parse
from ..data import ComponentID, Component, Data
from ..subset import Subset
class TestParse(object):
def test_re_matches_valid_names(self):
reg = parse.TAG_RE
valid = ['{a}', '{ a }', '{A}', '{a }', '{ a}',
'{a_}', '{abc_1}', '{_abc_1}', '{1}', '{1_}']
invalid = ['', '{}', '{a ']
for v in valid:
assert reg.match(v) is not None
for i in invalid:
assert reg.match(i) is None
def test_group(self):
reg = parse.TAG_RE
assert reg.match('{a}').group('tag') == 'a'
assert reg.match('{ a }').group('tag') == 'a'
assert reg.match('{ A }').group('tag') == 'A'
assert reg.match('{ Abc_ }').group('tag') == 'Abc_'
def test_reference_list(self):
cmd = '{a} - {b} + {c}'
refs = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
expected = set([1, 2, 3])
result = set(parse._reference_list(cmd, refs))
assert expected == result
def test_reference_list_invalid_cmd(self):
with pytest.raises(KeyError) as exc:
parse._reference_list('{a}', {})
assert exc.value.args[0] == ("Tags from command not in "
"reference mapping")
def test_dereference(self):
c1 = ComponentID('c1')
c2 = ComponentID('c2')
s1 = Subset(None, label='s1')
s2 = Subset(None, label='s2')
refs = dict([('c1', c1), ('c2', c2), ('s1', s1), ('s2', s2)])
cmd = '({c1} > 10) and {s1}'
expected = ('(data[references["c1"], __view] > 10) and '
'references["s1"].to_mask(__view)')
result = parse._dereference(cmd, refs)
assert expected == result
def test_validate(self):
ref = {'a': ComponentID('ca'), 'b': ComponentID('cb')}
parse._validate('{a} + {b}', ref)
parse._validate('{a}', ref)
parse._validate('3 + 4', ref)
with pytest.raises(parse.InvalidTagError) as exc:
parse._validate('{c}', ref)
assert exc.value.args[0] == ("Tag c not in reference mapping: "
"['a', 'b']")
def test_ensure_only_component_references(self):
ref = {'a': 1, 'b': ComponentID('b')}
F = parse._ensure_only_component_references
F('{b} + 5', ref)
with pytest.raises(TypeError) as exc:
F('{b} + {a}', ref)
assert exc.value.args[0] == ("Reference to a, which is not a "
"ComponentID")
with pytest.raises(TypeError) as exc:
F('{b} + {d}', ref)
assert exc.value.args[0] == ("Reference to d, which is not a "
"ComponentID")
class TestParsedCommand(object):
def test_evaluate_component(self):
data = MagicMock()
c1 = ComponentID('c1')
data.__getitem__.return_value = 5
cmd = '{comp1} * 5'
refs = {'comp1': c1}
pc = parse.ParsedCommand(cmd, refs)
assert pc.evaluate(data) == 25
data.__getitem__.assert_called_once_with((c1, None))
def test_evaluate_subset(self):
data = Data(x=[1, 2, 3])
sub1 = data.new_subset(data.id['x'] > 1)
sub2 = data.new_subset(data.id['x'] < 3)
cmd = '{s1} & {s2}'
refs = {'s1': sub1, 's2': sub2}
pc = parse.ParsedCommand(cmd, refs)
np.testing.assert_equal(pc.evaluate(data), [0, 1, 0])
def test_evaluate_function(self):
data = MagicMock()
c1 = ComponentID('c1')
data.__getitem__.return_value = 5
cmd = 'max({comp1}, 100)'
refs = {'comp1': c1}
pc = parse.ParsedCommand(cmd, refs)
assert pc.evaluate(data) == 100
data.__getitem__.assert_called_once_with((c1, None))
def test_evaluate_math(self):
# If numpy, np, and math aren't defined in the config.py file, they
# are added to the local variables available.
data = MagicMock()
c1 = ComponentID('c1')
data.__getitem__.return_value = 10
refs = {'comp1': c1}
cmd = 'numpy.log10({comp1})'
pc = parse.ParsedCommand(cmd, refs)
assert pc.evaluate(data) == 1
cmd = 'np.log10({comp1})'
pc = parse.ParsedCommand(cmd, refs)
assert pc.evaluate(data) == 1
cmd = 'math.log10({comp1})'
pc = parse.ParsedCommand(cmd, refs)
assert pc.evaluate(data) == 1
def test_evaluate_test(self):
data = MagicMock()
c1 = ComponentID('c1')
data.__getitem__.return_value = 10
refs = {'comp1': c1}
cmd = 'numpy.log10({comp1}) + 3.4 - {comp1}'
pc = parse.ParsedCommand(cmd, refs)
pc.evaluate_test()
cmd = 'nump.log10({comp1}) + 3.4 - {comp1}'
pc = parse.ParsedCommand(cmd, refs)
with pytest.raises(NameError) as exc:
pc.evaluate_test()
assert exc.value.args[0] == "name 'nump' is not defined"
class TestParsedComponentLink(object):
def make_link(self):
data = Data()
comp = Component(np.array([1, 2, 3]))
c1 = ComponentID('c1')
c2 = ComponentID('c2')
data.add_component(comp, c1)
cmd = '{comp1} * 100'
refs = {'comp1': c1}
pc = parse.ParsedCommand(cmd, refs)
cl = parse.ParsedComponentLink(c2, pc)
data.add_component_link(cl)
return data, c2
def test(self):
data, cid = self.make_link()
result = data[cid]
expected = np.array([100, 200, 300])
np.testing.assert_array_equal(result, expected)
def test_not_identity(self):
# regression test
d = Data(x=[1, 2, 3])
c2 = ComponentID('c2')
cmd = '{x}'
refs = {'x': d.id['x']}
pc = parse.ParsedCommand(cmd, refs)
link = parse.ParsedComponentLink(c2, pc)
assert not link.identity
def test_slice(self):
data, cid = self.make_link()
result = data[cid, ::2]
np.testing.assert_array_equal(result, [100, 300])
def test_save_load(self):
from .test_state import clone
d = Data(x=[1, 2, 3])
c2 = ComponentID('c2')
cmd = '{x} + 1'
refs = {'x': d.id['x']}
pc = parse.ParsedCommand(cmd, refs)
link = parse.ParsedComponentLink(c2, pc)
d.add_component_link(link)
d2 = clone(d)
np.testing.assert_array_equal(d2['c2'], [2, 3, 4])
class TestParsedSubsetState(object):
def setup_method(self, method):
data = Data(g=[2, 4, 6, 8])
s1 = data.new_subset()
s2 = data.new_subset()
s1.subset_state = np.array([1, 1, 1, 0], dtype=bool)
s2.subset_state = np.array([0, 1, 1, 1], dtype=bool)
self.refs = {'s1': s1, 's2': s2, 'g': data.id['g']}
self.data = data
def test_two_subset(self):
cmd = '{s1} & {s2}'
s = self.data.new_subset()
p = parse.ParsedCommand(cmd, self.refs)
state = parse.ParsedSubsetState(p)
s.subset_state = state
result = s.to_mask()
expected = np.array([0, 1, 1, 0], dtype=bool)
np.testing.assert_array_equal(result, expected)
def test_two_subset_and_component(self):
cmd = '{s1} & {s2} & ({g} < 6)'
s = self.data.new_subset()
p = parse.ParsedCommand(cmd, self.refs)
state = parse.ParsedSubsetState(p)
s.subset_state = state
result = s.to_mask()
expected = np.array([0, 1, 0, 0], dtype=bool)
np.testing.assert_array_equal(result, expected)
|
the-stack_0_13111 | # -*- coding: utf-8 -*-
"""
XForm Survey element classes for different question types.
"""
import os.path
import re
from pyxform.errors import PyXFormError
from pyxform.question_type_dictionary import QUESTION_TYPE_DICT
from pyxform.survey_element import SurveyElement
from pyxform.utils import (
basestring,
node,
unicode,
default_is_dynamic,
has_dynamic_label,
)
class Question(SurveyElement):
def validate(self):
SurveyElement.validate(self)
# make sure that the type of this question exists in the
# question type dictionary.
if self.type not in QUESTION_TYPE_DICT:
raise PyXFormError("Unknown question type '%s'." % self.type)
def xml_instance(self, **kwargs):
survey = self.get_root()
attributes = {}
attributes.update(self.get("instance", {}))
for key, value in attributes.items():
attributes[key] = survey.insert_xpaths(value, self)
if self.get("default") and not default_is_dynamic(self.default, self.type):
return node(self.name, unicode(self.get("default")), **attributes)
return node(self.name, **attributes)
def xml_control(self):
if self.type == "calculate" or (
("calculate" in self.bind or self.trigger) and not (self.label or self.hint)
):
nested_setvalues = self.get_root().get_setvalues_for_question_name(
self.name
)
if nested_setvalues:
for setvalue in nested_setvalues:
msg = (
"The question ${%s} is not user-visible so it can't be used as a calculation trigger for question ${%s}."
% (self.name, setvalue[0])
)
raise PyXFormError(msg)
return None
xml_node = self.build_xml()
if xml_node:
self.nest_setvalues(xml_node)
return xml_node
def nest_setvalues(self, xml_node):
nested_setvalues = self.get_root().get_setvalues_for_question_name(self.name)
if nested_setvalues:
for setvalue in nested_setvalues:
setvalue_attrs = {
"ref": self.get_root()
.insert_xpaths("${%s}" % setvalue[0], self.get_root())
.strip(),
"event": "xforms-value-changed",
}
if not (setvalue[1] == ""):
setvalue_attrs["value"] = self.get_root().insert_xpaths(
setvalue[1], self
)
setvalue_node = node("setvalue", **setvalue_attrs)
xml_node.appendChild(setvalue_node)
def build_xml(self):
return None
class InputQuestion(Question):
"""
This control string is the same for: strings, integers, decimals,
dates, geopoints, barcodes ...
"""
def build_xml(self):
control_dict = self.control
label_and_hint = self.xml_label_and_hint()
survey = self.get_root()
# Resolve field references in attributes
for key, value in control_dict.items():
control_dict[key] = survey.insert_xpaths(value, self)
control_dict["ref"] = self.get_xpath()
result = node(**control_dict)
if label_and_hint:
for element in self.xml_label_and_hint():
result.appendChild(element)
# Input types are used for selects with external choices sheets.
if self["query"]:
choice_filter = self.get("choice_filter")
query = "instance('" + self["query"] + "')/root/item"
choice_filter = survey.insert_xpaths(choice_filter, self, True)
if choice_filter:
query += "[" + choice_filter + "]"
result.setAttribute("query", query)
return result
class TriggerQuestion(Question):
def build_xml(self):
control_dict = self.control
survey = self.get_root()
# Resolve field references in attributes
for key, value in control_dict.items():
control_dict[key] = survey.insert_xpaths(value, self)
control_dict["ref"] = self.get_xpath()
return node("trigger", *self.xml_label_and_hint(), **control_dict)
class UploadQuestion(Question):
def _get_media_type(self):
return self.control["mediatype"]
def build_xml(self):
control_dict = self.control
survey = self.get_root()
# Resolve field references in attributes
for key, value in control_dict.items():
control_dict[key] = survey.insert_xpaths(value, self)
control_dict["ref"] = self.get_xpath()
control_dict["mediatype"] = self._get_media_type()
return node("upload", *self.xml_label_and_hint(), **control_dict)
class Option(SurveyElement):
def xml_value(self):
return node("value", self.name)
def xml(self):
item = node("item")
self.xml_label()
item.appendChild(self.xml_label())
item.appendChild(self.xml_value())
return item
def validate(self):
pass
class MultipleChoiceQuestion(Question):
def __init__(self, **kwargs):
kwargs_copy = kwargs.copy()
# Notice that choices can be specified under choices or children.
# I'm going to try to stick to just choices.
# Aliases in the json format will make it more difficult
# to use going forward.
choices = list(kwargs_copy.pop("choices", [])) + list(
kwargs_copy.pop("children", [])
)
Question.__init__(self, **kwargs_copy)
for choice in choices:
self.add_choice(**choice)
def add_choice(self, **kwargs):
option = Option(**kwargs)
self.add_child(option)
def validate(self):
Question.validate(self)
descendants = self.iter_descendants()
next(descendants) # iter_descendants includes self; we need to pop it
for choice in descendants:
choice.validate()
def build_xml(self):
assert self.bind["type"] in ["string", "odk:rank"]
survey = self.get_root()
control_dict = self.control.copy()
# Resolve field references in attributes
for key, value in control_dict.items():
control_dict[key] = survey.insert_xpaths(value, self)
control_dict["ref"] = self.get_xpath()
result = node(**control_dict)
for element in self.xml_label_and_hint():
result.appendChild(element)
choices = survey.get("choices")
multi_language = False
if choices is not None and len(choices) > 0:
first_choices = next(iter(choices.values()))
multi_language = isinstance(first_choices[0].get("label"), dict)
# itemset are only supposed to be strings,
# check to prevent the rare dicts that show up
if self["itemset"] and isinstance(self["itemset"], basestring):
choice_filter = self.get("choice_filter")
itemset_value_ref = "name"
itemset, file_extension = os.path.splitext(self["itemset"])
has_media = False
has_dyn_label = False
is_previous_question = bool(re.match(r"^\${.*}$", self.get("itemset")))
if choices.get(itemset):
has_media = bool(choices[itemset][0].get("media"))
has_dyn_label = has_dynamic_label(choices[itemset], multi_language)
if file_extension in [".csv", ".xml"]:
itemset = itemset
itemset_label_ref = "label"
else:
if not multi_language and not has_media and not has_dyn_label:
itemset = self["itemset"]
itemset_label_ref = "label"
else:
itemset = self["itemset"]
itemset_label_ref = "jr:itext(itextId)"
choice_filter = survey.insert_xpaths(
choice_filter, self, True, is_previous_question
)
if is_previous_question:
path = (
survey.insert_xpaths(self["itemset"], self, reference_parent=True)
.strip()
.split("/")
)
nodeset = "/".join(path[:-1])
itemset_value_ref = path[-1]
itemset_label_ref = path[-1]
if choice_filter:
choice_filter = choice_filter.replace(
"current()/" + nodeset, "."
).replace(nodeset, ".")
else:
# Choices must have a value. Filter out repeat instances without
# an answer for the linked question
name = path[-1]
choice_filter = f"./{name} != ''"
else:
nodeset = "instance('" + itemset + "')/root/item"
if choice_filter:
nodeset += "[" + choice_filter + "]"
if self["parameters"]:
params = self["parameters"]
if "randomize" in params and params["randomize"] == "true":
nodeset = "randomize(" + nodeset
if "seed" in params:
if params["seed"].startswith("${"):
nodeset = (
nodeset
+ ", "
+ survey.insert_xpaths(params["seed"], self).strip()
)
else:
nodeset = nodeset + ", " + params["seed"]
nodeset += ")"
itemset_children = [
node("value", ref=itemset_value_ref),
node("label", ref=itemset_label_ref),
]
result.appendChild(node("itemset", *itemset_children, nodeset=nodeset))
else:
for child in self.children:
result.appendChild(child.xml())
return result
class SelectOneQuestion(MultipleChoiceQuestion):
def __init__(self, **kwargs):
super(SelectOneQuestion, self).__init__(**kwargs)
self._dict[self.TYPE] = "select one"
class Tag(SurveyElement):
def __init__(self, **kwargs):
kwargs_copy = kwargs.copy()
choices = kwargs_copy.pop("choices", []) + kwargs_copy.pop("children", [])
super(Tag, self).__init__(**kwargs_copy)
if choices:
self.children = []
for choice in choices:
option = Option(**choice)
self.add_child(option)
def xml(self):
result = node("tag", key=self.name)
self.xml_label()
result.appendChild(self.xml_label())
for choice in self.children:
result.appendChild(choice.xml())
return result
def validate(self):
pass
class OsmUploadQuestion(UploadQuestion):
def __init__(self, **kwargs):
kwargs_copy = kwargs.copy()
tags = kwargs_copy.pop("tags", []) + kwargs_copy.pop("children", [])
super(OsmUploadQuestion, self).__init__(**kwargs_copy)
if tags:
self.children = []
for tag in tags:
self.add_tag(**tag)
def add_tag(self, **kwargs):
tag = Tag(**kwargs)
self.add_child(tag)
def build_xml(self):
control_dict = self.control
control_dict["ref"] = self.get_xpath()
control_dict["mediatype"] = self._get_media_type()
result = node("upload", *self.xml_label_and_hint(), **control_dict)
for osm_tag in self.children:
result.appendChild(osm_tag.xml())
return result
class RangeQuestion(Question):
def build_xml(self):
control_dict = self.control
label_and_hint = self.xml_label_and_hint()
survey = self.get_root()
# Resolve field references in attributes
for key, value in control_dict.items():
control_dict[key] = survey.insert_xpaths(value, self)
control_dict["ref"] = self.get_xpath()
params = self.get("parameters", {})
control_dict.update(params)
result = node(**control_dict)
if label_and_hint:
for element in self.xml_label_and_hint():
result.appendChild(element)
return result
|
the-stack_0_13115 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module provides an interface between the previous Pod
API and outputs a kubernetes.client.models.V1Pod.
The advantage being that the full Kubernetes API
is supported and no serialization need be written.
"""
import copy
import kubernetes.client.models as k8s
from airflow.executors import Executors
import uuid
class PodDefaults:
"""
Static defaults for the PodGenerator
"""
XCOM_MOUNT_PATH = '/airflow/xcom'
SIDECAR_CONTAINER_NAME = 'airflow-xcom-sidecar'
XCOM_CMD = """import time
while True:
try:
time.sleep(3600)
except KeyboardInterrupt:
exit(0)
"""
VOLUME_MOUNT = k8s.V1VolumeMount(
name='xcom',
mount_path=XCOM_MOUNT_PATH
)
VOLUME = k8s.V1Volume(
name='xcom',
empty_dir=k8s.V1EmptyDirVolumeSource()
)
SIDECAR_CONTAINER = k8s.V1Container(
name=SIDECAR_CONTAINER_NAME,
command=['python', '-c', XCOM_CMD],
image='python:3.5-alpine',
volume_mounts=[VOLUME_MOUNT]
)
class PodGenerator:
"""
Contains Kubernetes Airflow Worker configuration logic
Represents a kubernetes pod and manages execution of a single pod.
:param image: The docker image
:type image: str
:param envs: A dict containing the environment variables
:type envs: Dict[str, str]
:param cmds: The command to be run on the pod
:type cmds: List[str]
:param secrets: Secrets to be launched to the pod
:type secrets: List[airflow.kubernetes.models.secret.Secret]
:param image_pull_policy: Specify a policy to cache or always pull an image
:type image_pull_policy: str
:param image_pull_secrets: Any image pull secrets to be given to the pod.
If more than one secret is required, provide a comma separated list:
secret_a,secret_b
:type image_pull_secrets: str
:param affinity: A dict containing a group of affinity scheduling rules
:type affinity: dict
:param hostnetwork: If True enable host networking on the pod
:type hostnetwork: bool
:param tolerations: A list of kubernetes tolerations
:type tolerations: list
:param security_context: A dict containing the security context for the pod
:type security_context: dict
:param configmaps: Any configmap refs to envfrom.
If more than one configmap is required, provide a comma separated list
configmap_a,configmap_b
:type configmaps: str
:param dnspolicy: Specify a dnspolicy for the pod
:type dnspolicy: str
:param pod: The fully specified pod.
:type pod: kubernetes.client.models.V1Pod
"""
def __init__(
self,
image,
name=None,
namespace=None,
volume_mounts=None,
envs=None,
cmds=None,
args=None,
labels=None,
node_selectors=None,
ports=None,
volumes=None,
image_pull_policy='IfNotPresent',
restart_policy='Never',
image_pull_secrets=None,
init_containers=None,
service_account_name=None,
resources=None,
annotations=None,
affinity=None,
hostnetwork=False,
tolerations=None,
security_context=None,
configmaps=None,
dnspolicy=None,
pod=None,
extract_xcom=False,
):
self.ud_pod = pod
self.pod = k8s.V1Pod()
self.pod.api_version = 'v1'
self.pod.kind = 'Pod'
# Pod Metadata
self.metadata = k8s.V1ObjectMeta()
self.metadata.labels = labels
self.metadata.name = name + "-" + str(uuid.uuid4())[:8] if name else None
self.metadata.namespace = namespace
self.metadata.annotations = annotations
# Pod Container
self.container = k8s.V1Container(name='base')
self.container.image = image
self.container.env = []
if envs:
if isinstance(envs, dict):
for key, val in envs.items():
self.container.env.append(k8s.V1EnvVar(
name=key,
value=val
))
elif isinstance(envs, list):
self.container.env.extend(envs)
configmaps = configmaps or []
self.container.env_from = []
for configmap in configmaps:
self.container.env_from.append(k8s.V1EnvFromSource(
config_map_ref=k8s.V1ConfigMapEnvSource(
name=configmap
)
))
self.container.command = cmds or []
self.container.args = args or []
self.container.image_pull_policy = image_pull_policy
self.container.ports = ports or []
self.container.resources = resources
self.container.volume_mounts = volume_mounts or []
# Pod Spec
self.spec = k8s.V1PodSpec(containers=[])
self.spec.security_context = security_context
self.spec.tolerations = tolerations
self.spec.dns_policy = dnspolicy
self.spec.host_network = hostnetwork
self.spec.affinity = affinity
self.spec.service_account_name = service_account_name
self.spec.init_containers = init_containers
self.spec.volumes = volumes or []
self.spec.node_selector = node_selectors
self.spec.restart_policy = restart_policy
self.spec.image_pull_secrets = []
if image_pull_secrets:
for image_pull_secret in image_pull_secrets.split(','):
self.spec.image_pull_secrets.append(k8s.V1LocalObjectReference(
name=image_pull_secret
))
# Attach sidecar
self.extract_xcom = extract_xcom
def gen_pod(self) -> k8s.V1Pod:
result = self.ud_pod
if result is None:
result = self.pod
result.spec = self.spec
result.metadata = self.metadata
result.spec.containers = [self.container]
if self.extract_xcom:
result = self.add_sidecar(result)
return result
@staticmethod
def add_sidecar(pod: k8s.V1Pod) -> k8s.V1Pod:
pod_cp = copy.deepcopy(pod)
pod_cp.spec.volumes.insert(0, PodDefaults.VOLUME)
pod_cp.spec.containers[0].volume_mounts.insert(0, PodDefaults.VOLUME_MOUNT)
pod_cp.spec.containers.append(PodDefaults.SIDECAR_CONTAINER)
return pod_cp
@staticmethod
def from_obj(obj) -> k8s.V1Pod:
if obj is None:
return k8s.V1Pod()
if isinstance(obj, PodGenerator):
return obj.gen_pod()
if not isinstance(obj, dict):
raise TypeError(
'Cannot convert a non-dictionary or non-PodGenerator '
'object into a KubernetesExecutorConfig')
namespaced = obj.get(Executors.KubernetesExecutor, {})
resources = namespaced.get('resources')
if resources is None:
requests = {
'cpu': namespaced.get('request_cpu'),
'memory': namespaced.get('request_memory')
}
limits = {
'cpu': namespaced.get('limit_cpu'),
'memory': namespaced.get('limit_memory')
}
all_resources = list(requests.values()) + list(limits.values())
if all(r is None for r in all_resources):
resources = None
else:
resources = k8s.V1ResourceRequirements(
requests=requests,
limits=limits
)
annotations = namespaced.get('annotations', {})
gcp_service_account_key = namespaced.get('gcp_service_account_key', None)
if annotations is not None and gcp_service_account_key is not None:
annotations.update({
'iam.cloud.google.com/service-account': gcp_service_account_key
})
pod_spec_generator = PodGenerator(
image=namespaced.get('image'),
envs=namespaced.get('env'),
cmds=namespaced.get('cmds'),
args=namespaced.get('args'),
labels=namespaced.get('labels'),
node_selectors=namespaced.get('node_selectors'),
name=namespaced.get('name'),
ports=namespaced.get('ports'),
volumes=namespaced.get('volumes'),
volume_mounts=namespaced.get('volume_mounts'),
namespace=namespaced.get('namespace'),
image_pull_policy=namespaced.get('image_pull_policy'),
restart_policy=namespaced.get('restart_policy'),
image_pull_secrets=namespaced.get('image_pull_secrets'),
init_containers=namespaced.get('init_containers'),
service_account_name=namespaced.get('service_account_name'),
resources=resources,
annotations=namespaced.get('annotations'),
affinity=namespaced.get('affinity'),
hostnetwork=namespaced.get('hostnetwork'),
tolerations=namespaced.get('tolerations'),
security_context=namespaced.get('security_context'),
configmaps=namespaced.get('configmaps'),
dnspolicy=namespaced.get('dnspolicy'),
pod=namespaced.get('pod'),
extract_xcom=namespaced.get('extract_xcom'),
)
return pod_spec_generator.gen_pod()
@staticmethod
def reconcile_pods(base_pod: k8s.V1Pod, client_pod: k8s.V1Pod) -> k8s.V1Pod:
"""
:param base_pod: has the base attributes which are overwritten if they exist
in the client pod and remain if they do not exist in the client_pod
:type base_pod: k8s.V1Pod
:param client_pod: the pod that the client wants to create.
:type client_pod: k8s.V1Pod
:return: the merged pods
This can't be done recursively as certain fields are preserved,
some overwritten, and some concatenated, e.g. The command
should be preserved from base, the volumes appended to and
the other fields overwritten.
"""
client_pod_cp = copy.deepcopy(client_pod)
def merge_objects(base_obj, client_obj):
for base_key in base_obj.to_dict().keys():
base_val = getattr(base_obj, base_key, None)
if not getattr(client_obj, base_key, None) and base_val:
setattr(client_obj, base_key, base_val)
def extend_object_field(base_obj, client_obj, field_name):
base_obj_field = getattr(base_obj, field_name, None)
client_obj_field = getattr(client_obj, field_name, None)
if not base_obj_field:
return
if not client_obj_field:
setattr(client_obj, field_name, base_obj_field)
return
appended_fields = base_obj_field + client_obj_field
setattr(client_obj, field_name, appended_fields)
# Values at the pod and metadata should be overwritten where they exist,
# but certain values at the spec and container level must be conserved.
base_container = base_pod.spec.containers[0]
client_container = client_pod_cp.spec.containers[0]
extend_object_field(base_container, client_container, 'volume_mounts')
extend_object_field(base_container, client_container, 'env')
extend_object_field(base_container, client_container, 'env_from')
extend_object_field(base_container, client_container, 'ports')
extend_object_field(base_container, client_container, 'volume_devices')
client_container.command = base_container.command
client_container.args = base_container.args
merge_objects(base_pod.spec.containers[0], client_pod_cp.spec.containers[0])
# Just append any additional containers from the base pod
client_pod_cp.spec.containers.extend(base_pod.spec.containers[1:])
merge_objects(base_pod.metadata, client_pod_cp.metadata)
extend_object_field(base_pod.spec, client_pod_cp.spec, 'volumes')
merge_objects(base_pod.spec, client_pod_cp.spec)
merge_objects(base_pod, client_pod_cp)
return client_pod_cp
|
the-stack_0_13116 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
import json
from dataclasses import dataclass, field
from json import JSONEncoder
from typing import Set, Dict, Union, List, Tuple
from sqlalchemy.orm import Session
from typing_extensions import TypeAlias
from .queries import get_warning_message_range
from .sarif_types import SARIFResult
from .ui.issues import IssueQueryResult
SARIFOutput: TypeAlias = Dict[
str,
Union[
List[
Dict[
str,
Union[
Dict[str, Dict[str, Union[List[Dict[str, str]], str]]],
List[SARIFResult],
],
]
],
str,
],
]
@dataclass
class SARIF:
version: str = "2.1.0"
schema: str = "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json" # noqa
_tool_warning_code_ranges: Dict[str, Tuple[int, int]] = field(default_factory=dict)
driver: Dict[str, Union[str, List[Dict[str, str]]]] = field(default_factory=dict)
results: List[SARIFResult] = field(default_factory=list)
def __init__(
self, tool: str, session: Session, filtered_issues: Set[IssueQueryResult]
) -> None:
self._tool_warning_code_ranges = {
"mariana-trench": (4000, 5000),
"pysa": (5000, 6000),
}
driver_json = {}
if tool == "pysa":
driver_json["name"] = "Pysa"
driver_json["informationUri"] = "https://github.com/facebook/pyre-check/"
tool_warning_messages = get_warning_message_range(
session,
self._tool_warning_code_ranges[tool][0],
self._tool_warning_code_ranges[tool][1],
)
rules_json = []
for rule in tool_warning_messages:
rules_json.append({"id": str(rule.code), "name": rule.message})
driver_json["rules"] = rules_json
else:
raise NotImplementedError
self.driver = driver_json
self.results = [issue.to_sarif() for issue in filtered_issues]
def to_json(self, indent: int = 2) -> str:
return json.dumps(self, cls=SARIFEncoder, indent=indent)
class SARIFEncoder(JSONEncoder):
def default(self, o: SARIF) -> SARIFOutput:
return {
"version": o.version,
"$schema": o.schema,
"runs": [
{
"tool": {"driver": o.driver},
"results": o.results,
}
],
}
|
the-stack_0_13117 | #! /usr/bin/env py.test
import time
from mwlib import myjson as json
from mwlib import serve
def mkcolldir(tmpdir, name):
cid = serve.make_collection_id({'metabook': json.dumps({'title': name, "type": "collection"})})
d = tmpdir.join(cid[0], cid[:2], cid).ensure(dir=1)
d.join("output.rl").write("bla")
return d
def test_purge_cache(tmpdir):
d1 = mkcolldir(tmpdir, 'c1')
d2 = mkcolldir(tmpdir, 'c2')
d2.join("output.rl").setmtime(time.time() - 2)
serve.purge_cache(1, tmpdir.strpath)
assert d1.check()
assert not d2.check()
|
the-stack_0_13118 | import tensorflow as tf
from tqdm import tqdm
from ..texts._text_functions import str_idx, build_dataset
class Model:
def __init__(
self,
size_layer = 128,
num_layers = 1,
embedded_size = 128,
dict_size = 5000,
learning_rate = 1e-3,
output_size = 300,
dropout = 0.8,
):
def cells(size, reuse = False):
cell = tf.nn.rnn_cell.LSTMCell(
size, initializer = tf.orthogonal_initializer(), reuse = reuse
)
return tf.contrib.rnn.DropoutWrapper(
cell, output_keep_prob = dropout
)
def birnn(inputs, scope):
with tf.variable_scope(scope):
for n in range(num_layers):
(out_fw, out_bw), (
state_fw,
state_bw,
) = tf.nn.bidirectional_dynamic_rnn(
cell_fw = cells(size_layer // 2),
cell_bw = cells(size_layer // 2),
inputs = inputs,
dtype = tf.float32,
scope = 'bidirectional_rnn_%d' % (n),
)
inputs = tf.concat((out_fw, out_bw), 2)
return tf.layers.dense(inputs[:, -1], output_size)
self.X_left = tf.placeholder(tf.int32, [None, None])
self.X_right = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.float32, [None])
self.batch_size = tf.shape(self.X_left)[0]
encoder_embeddings = tf.Variable(
tf.random_uniform([dict_size, embedded_size], -1, 1)
)
embedded_left = tf.nn.embedding_lookup(encoder_embeddings, self.X_left)
embedded_right = tf.nn.embedding_lookup(
encoder_embeddings, self.X_right
)
def contrastive_loss(y, d):
tmp = y * tf.square(d)
tmp2 = (1 - y) * tf.square(tf.maximum((1 - d), 0))
return (
tf.reduce_sum(tmp + tmp2)
/ tf.cast(self.batch_size, tf.float32)
/ 2
)
self.output_left = birnn(embedded_left, 'left')
self.output_right = birnn(embedded_right, 'right')
self.distance = tf.sqrt(
tf.reduce_sum(
tf.square(tf.subtract(self.output_left, self.output_right)),
1,
keepdims = True,
)
)
self.distance = tf.div(
self.distance,
tf.add(
tf.sqrt(
tf.reduce_sum(
tf.square(self.output_left), 1, keepdims = True
)
),
tf.sqrt(
tf.reduce_sum(
tf.square(self.output_right), 1, keepdims = True
)
),
),
)
self.distance = tf.reshape(self.distance, [-1])
self.cost = contrastive_loss(self.Y, self.distance)
self.temp_sim = tf.subtract(
tf.ones_like(self.distance), tf.rint(self.distance)
)
correct_predictions = tf.equal(self.temp_sim, self.Y)
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, 'float'))
self.optimizer = tf.train.AdamOptimizer(
learning_rate = learning_rate
).minimize(self.cost)
def train_model(
train_X_left,
train_X_right,
train_Y,
epoch = 10,
batch_size = 16,
embedding_size = 256,
output_size = 300,
maxlen = 100,
dropout = 0.8,
num_layers = 1,
**kwargs
):
concat = (' '.join(train_X_left + train_X_right)).split()
vocabulary_size = len(list(set(concat)))
_, _, dictionary, reversed_dictionary = build_dataset(
concat, vocabulary_size
)
_graph = tf.Graph()
with _graph.as_default():
sess = tf.InteractiveSession()
model = Model(
size_layer = embedding_size,
num_layers = num_layers,
embedded_size = embedding_size,
dict_size = len(dictionary),
output_size = output_size,
dropout = dropout,
)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.trainable_variables())
vectors_left = str_idx(train_X_left, dictionary, maxlen, UNK = 3)
vectors_right = str_idx(train_X_right, dictionary, maxlen, UNK = 3)
for e in range(epoch):
pbar = tqdm(
range(0, len(vectors_left), batch_size), desc = 'minibatch loop'
)
for i in pbar:
batch_x_left = vectors_left[
i : min(i + batch_size, len(vectors_left))
]
batch_x_right = vectors_right[
i : min(i + batch_size, len(vectors_left))
]
batch_y = train_Y[i : min(i + batch_size, len(vectors_left))]
acc, loss, _ = sess.run(
[model.accuracy, model.cost, model.optimizer],
feed_dict = {
model.X_left: batch_x_left,
model.X_right: batch_x_right,
model.Y: batch_y,
},
)
pbar.set_postfix(cost = loss, accuracy = acc)
return sess, model, dictionary, saver, dropout
def load_siamese(location, json):
graph = tf.Graph()
with graph.as_default():
model = Model(
size_layer = json['embedding_size'],
num_layers = json['num_layers'],
embedded_size = json['embedding_size'],
dict_size = len(json['dictionary']),
output_size = json['output_size'],
dropout = json['dropout'],
)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.trainable_variables())
saver.restore(sess, location + '/model.ckpt')
return sess, model, saver
|
the-stack_0_13121 | """
Union of Features
==========================
This module contains steps to perform various feature unions and model stacking, using parallelism is possible.
..
Copyright 2019, Neuraxio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
..
Thanks to Umaneo Technologies Inc. for their contributions to this Machine Learning
project, visit https://www.umaneo.com/ for more information on Umaneo Technologies Inc.
"""
from joblib import Parallel, delayed
from neuraxle.base import BaseStep, TruncableSteps, NamedTupleList, Identity, ExecutionContext, DataContainer, \
NonFittableMixin, ForceHandleOnlyMixin
from neuraxle.steps.numpy import NumpyConcatenateInnerFeatures
class FeatureUnion(ForceHandleOnlyMixin, TruncableSteps):
"""Parallelize the union of many pipeline steps."""
def __init__(
self,
steps_as_tuple: NamedTupleList,
joiner: NonFittableMixin = NumpyConcatenateInnerFeatures(),
n_jobs: int = None,
backend: str = "threading",
cache_folder_when_no_handle: str = None
):
"""
Create a feature union.
:param steps_as_tuple: the NamedTupleList of steps to process in parallel and to join.
:param joiner: What will be used to join the features. For example, ``NumpyConcatenateInnerFeatures()``.
:param n_jobs: The number of jobs for the parallelized ``joblib.Parallel`` loop in fit and in transform.
:param backend: The type of parallelization to do with ``joblib.Parallel``. Possible values: "loky", "multiprocessing", "threading", "dask" if you use dask, and more.
"""
steps_as_tuple.append(('joiner', joiner))
TruncableSteps.__init__(self, steps_as_tuple)
self.n_jobs = n_jobs
self.backend = backend
ForceHandleOnlyMixin.__init__(self, cache_folder=cache_folder_when_no_handle)
def _fit_data_container(self, data_container, context):
"""
Fit the parallel steps on the data. It will make use of some parallel processing.
:param data_container: The input data to fit onto
:param context: execution context
:return: self
"""
# Actually fit:
if self.n_jobs != 1:
fitted_steps = Parallel(backend=self.backend, n_jobs=self.n_jobs)(
delayed(step.handle_fit)(data_container.copy(), context)
for _, step in self.steps_as_tuple[:-1]
)
else:
fitted_steps = [
step.handle_fit(data_container.copy(), context)
for _, step in self.steps_as_tuple[:-1]
]
self._save_fitted_steps(fitted_steps)
return self
def _transform_data_container(self, data_container, context):
"""
Transform the data with the unions. It will make use of some parallel processing.
:param data_container: data container
:param context: execution context
:return: the transformed data_inputs.
"""
if self.n_jobs != 1:
data_containers = Parallel(backend=self.backend, n_jobs=self.n_jobs)(
delayed(step.handle_transform)(data_container.copy(), context)
for _, step in self.steps_as_tuple[:-1]
)
else:
data_containers = [
step.handle_transform(data_container.copy(), context)
for _, step in self.steps_as_tuple[:-1]
]
return DataContainer(
data_inputs=data_containers,
current_ids=data_container.current_ids,
summary_id=data_container.summary_id,
expected_outputs=data_container.expected_outputs,
sub_data_containers=data_container.sub_data_containers
)
def _did_transform(self, data_container, context):
data_container = self[-1].handle_transform(data_container, context)
return data_container
def _fit_transform_data_container(self, data_container, context):
"""
Transform the data with the unions. It will make use of some parallel processing.
:param data_container: data container
:param context: execution context
:return: the transformed data_inputs.
"""
new_self = self._fit_data_container(data_container, context)
data_container = self._transform_data_container(data_container, context)
return new_self, data_container
def _save_fitted_steps(self, fitted_steps):
# Save fitted steps
for i, fitted_step in enumerate(fitted_steps[:-1]):
self.steps_as_tuple[i] = (self.steps_as_tuple[i][0], fitted_step)
self._refresh_steps()
def _did_fit_transform(self, data_container, context):
data_container = self[-1].handle_transform(data_container, context)
return data_container
class AddFeatures(FeatureUnion):
"""Parallelize the union of many pipeline steps AND concatenate the new features to the received inputs using Identity."""
def __init__(self, steps_as_tuple: NamedTupleList, **kwargs):
"""
Create a ``FeatureUnion`` where ``Identity`` is the first step so as to also keep
the inputs to concatenate them to the outputs.
:param steps_as_tuple: The steps to be sent to the ``FeatureUnion``. ``Identity()`` is prepended.
:param kwargs: Other arguments to send to ``FeatureUnion``.
"""
FeatureUnion.__init__(self, [Identity()] + steps_as_tuple, **kwargs)
class ModelStacking(FeatureUnion):
"""Performs a ``FeatureUnion`` of steps, and then send the joined result to the above judge step."""
def __init__(
self,
steps_as_tuple: NamedTupleList,
judge: BaseStep,
**kwargs
):
"""
Perform model stacking. The steps will be merged with a FeatureUnion,
and the judge will recombine the predictions.
:param steps_as_tuple: the NamedTupleList of steps to process in parallel and to join.
:param judge: a BaseStep that will learn to judge the best answer and who to trust out of every parallel steps.
:param kwargs: Other arguments to send to ``FeatureUnion``.
"""
FeatureUnion.__init__(self, steps_as_tuple, **kwargs)
self.judge: BaseStep = judge # TODO: add "other" types of step(s) to TuncableSteps or to another intermediate class. For example, to get their hyperparameters.
def _did_fit_transform(self, data_container, context) -> ('BaseStep', DataContainer):
data_container = super()._did_fit_transform(data_container, context)
fitted_judge, data_container = self.judge.handle_fit_transform(data_container, context)
self.judge = fitted_judge
return data_container
def _did_fit(self, data_container: DataContainer, context: ExecutionContext) -> DataContainer:
"""
Fit the parallel steps on the data. It will make use of some parallel processing.
Also, fit the judge on the result of the parallel steps.
:param data_container: data container to fit on
:param context: execution context
:return: self
"""
data_container = super()._did_fit(data_container, context)
data_container = super()._transform_data_container(data_container, context)
data_container = super()._did_transform(data_container, context)
fitted_judge = self.judge.handle_fit(data_container, context)
self.judge = fitted_judge
return data_container
def _did_transform(self, data_container, context) -> DataContainer:
"""
Transform the data with the unions. It will make use of some parallel processing.
Then, use the judge to refine the transformations.
:param data_container: data container to transform
:param context: execution context
"""
data_container = super()._did_transform(data_container, context)
results = self.judge.handle_transform(data_container, context)
data_container.set_data_inputs(results.data_inputs)
return data_container
|
the-stack_0_13122 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Functions to interact with Arrow memory allocated by Arrow Java.
These functions convert the objects holding the metadata, the actual
data is not copied at all.
This will only work with a JVM running in the same process such as provided
through jpype. Modules that talk to a remote JVM like py4j will not work as the
memory addresses reported by them are not reachable in the python process.
"""
import pyarrow as pa
def jvm_buffer(arrowbuf):
"""
Construct an Arrow buffer from io.netty.buffer.ArrowBuf
Parameters
----------
arrowbuf: io.netty.buffer.ArrowBuf
Arrow Buffer representation on the JVM
Returns
-------
pyarrow.Buffer
Python Buffer that references the JVM memory
"""
address = arrowbuf.memoryAddress()
size = arrowbuf.capacity()
return pa.foreign_buffer(address, size, arrowbuf.asNettyBuffer())
def _from_jvm_int_type(jvm_type):
"""
Convert a JVM int type to its Python equivalent.
Parameters
----------
jvm_type: org.apache.arrow.vector.types.pojo.ArrowType$Int
Returns
-------
typ: pyarrow.DataType
"""
bit_width = jvm_type.getBitWidth()
if jvm_type.getIsSigned():
if bit_width == 8:
return pa.int8()
elif bit_width == 16:
return pa.int16()
elif bit_width == 32:
return pa.int32()
elif bit_width == 64:
return pa.int64()
else:
if bit_width == 8:
return pa.uint8()
elif bit_width == 16:
return pa.uint16()
elif bit_width == 32:
return pa.uint32()
elif bit_width == 64:
return pa.uint64()
def _from_jvm_float_type(jvm_type):
"""
Convert a JVM float type to its Python equivalent.
Parameters
----------
jvm_type: org.apache.arrow.vector.types.pojo.ArrowType$FloatingPoint
Returns
-------
typ: pyarrow.DataType
"""
precision = jvm_type.getPrecision().toString()
if precision == 'HALF':
return pa.float16()
elif precision == 'SINGLE':
return pa.float32()
elif precision == 'DOUBLE':
return pa.float64()
def _from_jvm_time_type(jvm_type):
"""
Convert a JVM time type to its Python equivalent.
Parameters
----------
jvm_type: org.apache.arrow.vector.types.pojo.ArrowType$Time
Returns
-------
typ: pyarrow.DataType
"""
time_unit = jvm_type.getUnit().toString()
if time_unit == 'SECOND':
assert jvm_type.getBitWidth() == 32
return pa.time32('s')
elif time_unit == 'MILLISECOND':
assert jvm_type.getBitWidth() == 32
return pa.time32('ms')
elif time_unit == 'MICROSECOND':
assert jvm_type.getBitWidth() == 64
return pa.time64('us')
elif time_unit == 'NANOSECOND':
assert jvm_type.getBitWidth() == 64
return pa.time64('ns')
def _from_jvm_timestamp_type(jvm_type):
"""
Convert a JVM timestamp type to its Python equivalent.
Parameters
----------
jvm_type: org.apache.arrow.vector.types.pojo.ArrowType$Timestamp
Returns
-------
typ: pyarrow.DataType
"""
time_unit = jvm_type.getUnit().toString()
timezone = jvm_type.getTimezone()
if timezone is not None:
timezone = str(timezone)
if time_unit == 'SECOND':
return pa.timestamp('s', tz=timezone)
elif time_unit == 'MILLISECOND':
return pa.timestamp('ms', tz=timezone)
elif time_unit == 'MICROSECOND':
return pa.timestamp('us', tz=timezone)
elif time_unit == 'NANOSECOND':
return pa.timestamp('ns', tz=timezone)
def _from_jvm_date_type(jvm_type):
"""
Convert a JVM date type to its Python equivalent
Parameters
----------
jvm_type: org.apache.arrow.vector.types.pojo.ArrowType$Date
Returns
-------
typ: pyarrow.DataType
"""
day_unit = jvm_type.getUnit().toString()
if day_unit == 'DAY':
return pa.date32()
elif day_unit == 'MILLISECOND':
return pa.date64()
def field(jvm_field):
"""
Construct a Field from a org.apache.arrow.vector.types.pojo.Field
instance.
Parameters
----------
jvm_field: org.apache.arrow.vector.types.pojo.Field
Returns
-------
pyarrow.Field
"""
name = str(jvm_field.getName())
jvm_type = jvm_field.getType()
typ = None
if not jvm_type.isComplex():
type_str = jvm_type.getTypeID().toString()
if type_str == 'Null':
typ = pa.null()
elif type_str == 'Int':
typ = _from_jvm_int_type(jvm_type)
elif type_str == 'FloatingPoint':
typ = _from_jvm_float_type(jvm_type)
elif type_str == 'Utf8':
typ = pa.string()
elif type_str == 'Binary':
typ = pa.binary()
elif type_str == 'FixedSizeBinary':
typ = pa.binary(jvm_type.getByteWidth())
elif type_str == 'Bool':
typ = pa.bool_()
elif type_str == 'Time':
typ = _from_jvm_time_type(jvm_type)
elif type_str == 'Timestamp':
typ = _from_jvm_timestamp_type(jvm_type)
elif type_str == 'Date':
typ = _from_jvm_date_type(jvm_type)
elif type_str == 'Decimal':
typ = pa.decimal128(jvm_type.getPrecision(), jvm_type.getScale())
else:
raise NotImplementedError(
"Unsupported JVM type: {}".format(type_str))
else:
# TODO: The following JVM types are not implemented:
# Struct, List, FixedSizeList, Union, Dictionary
raise NotImplementedError(
"JVM field conversion only implemented for primitive types.")
nullable = jvm_field.isNullable()
jvm_metadata = jvm_field.getMetadata()
if jvm_metadata.isEmpty():
metadata = None
else:
metadata = {str(entry.getKey()): str(entry.getValue())
for entry in jvm_metadata.entrySet()}
return pa.field(name, typ, nullable, metadata)
def schema(jvm_schema):
"""
Construct a Schema from a org.apache.arrow.vector.types.pojo.Schema
instance.
Parameters
----------
jvm_schema: org.apache.arrow.vector.types.pojo.Schema
Returns
-------
pyarrow.Schema
"""
fields = jvm_schema.getFields()
fields = [field(f) for f in fields]
jvm_metadata = jvm_schema.getCustomMetadata()
if jvm_metadata.isEmpty():
metadata = None
else:
metadata = {str(entry.getKey()): str(entry.getValue())
for entry in jvm_metadata.entrySet()}
return pa.schema(fields, metadata)
def array(jvm_array):
"""
Construct an (Python) Array from its JVM equivalent.
Parameters
----------
jvm_array : org.apache.arrow.vector.ValueVector
Returns
-------
array : Array
"""
if jvm_array.getField().getType().isComplex():
minor_type_str = jvm_array.getMinorType().toString()
raise NotImplementedError(
"Cannot convert JVM Arrow array of type {},"
" complex types not yet implemented.".format(minor_type_str))
dtype = field(jvm_array.getField()).type
length = jvm_array.getValueCount()
buffers = [jvm_buffer(buf)
for buf in list(jvm_array.getBuffers(False))]
null_count = jvm_array.getNullCount()
return pa.Array.from_buffers(dtype, length, buffers, null_count)
def record_batch(jvm_vector_schema_root):
"""
Construct a (Python) RecordBatch from a JVM VectorSchemaRoot
Parameters
----------
jvm_vector_schema_root : org.apache.arrow.vector.VectorSchemaRoot
Returns
-------
record_batch: pyarrow.RecordBatch
"""
pa_schema = schema(jvm_vector_schema_root.getSchema())
arrays = []
for name in pa_schema.names:
arrays.append(array(jvm_vector_schema_root.getVector(name)))
return pa.RecordBatch.from_arrays(
arrays,
pa_schema.names,
metadata=pa_schema.metadata
)
|
the-stack_0_13123 | #!/usr/bin/python
import json
import tarfile
import time
import os
import shutil
import sys
import requests
import tempfile
import contextlib
import re
from requests.auth import HTTPBasicAuth
from subprocess import Popen, PIPE
class Export:
"""
This is a generic EuPathDB export tool for Galaxy. It is abstract and so must be subclassed by more
specialized export tools that implement those abstract classes.
"""
# Names for the 2 json files and the folder containing the dataset to be included in the tarball
DATASET_JSON = "dataset.json"
META_JSON = "meta.json"
DATAFILES = "datafiles"
def __init__(self, dataset_type, version, validation_script, args):
"""
Initializes the export class with the parameters needed to accomplish the export of user
datasets on Galaxy to EuPathDB projects.
:param dataset_type: The EuPathDB type of this dataset
:param version: The version of the EuPathDB type of this dataset
:param validation_script: A script that handles the validation of this dataset
:param args: An array of the input parameters
"""
self._type = dataset_type
self._version = version
self._validation_script = validation_script
# Extract and transform the parameters as needed into member variables
self.parse_params(args)
# This msec timestamp is used to denote both the created and modified times.
self._timestamp = int(time.time() * 1000)
# This is the name of the file to be exported sans extension. It will be used to designate a unique temporary
# directory and to export both the tarball and the flag that triggers IRODS to process the tarball. By
# convention, the dataset tarball is of the form dataset_uNNNNNN_tNNNNNNN.tgz where the NNNNNN following the _u
# is the WDK user id and _t is the msec timestamp
self._export_file_root = 'dataset_u' + str(self._user_id) + '_t' + str(self._timestamp) + '_p' + str(os.getpid())
print >> sys.stdout, "Export file root is " + self._export_file_root
# Set up the configuration data
(self._url, self._user, self._pwd, self._lz_coll, self._flag_coll) = self.collect_rest_data()
def parse_params(self, args):
"""
Salts away all generic parameters (i.e., the first 5 params) and do some initial validation. The subclasses
will handle the other parameters.
:param args:
:return:
"""
if len(args) < 6:
raise ValidationException("The tool was passed an insufficient numbers of arguments.")
self._dataset_name = args[0]
self._summary = args[1]
self._description = args[2]
# WDK user id is derived from the user email
user_email = args[3].strip()
if not re.match(r'.+\.\[email protected]$', user_email, flags=0):
raise ValidationException(
"The user email " + str(user_email) + " is not valid for the use of this tool.")
galaxy_user = user_email.split("@")[0]
self._user_id = galaxy_user[galaxy_user.rfind(".") + 1:]
# Used to find the configuration file containing IRODS url and credentials
self._tool_directory = args[4]
# Output file
self._output = args[5]
def collect_rest_data(self):
"""
Obtains the url and credentials and relevant collections needed to run the iRODS rest service.
At some point, this information should be fished out of a configuration file.
:return: A tuple containing the url, user, and password, landing zone and flags collection,
in that order
"""
config_path = self._tool_directory + "/../../config/config.json"
# The tool directory path seems glitchy on Globus Dev Galaxy instance after service restarts.
# Uncomment to check.
#print >> sys.stdout, "self._tool_directory is " + self._tool_directory
with open(config_path, "r+") as config_file:
config_json = json.load(config_file)
return (config_json["url"], config_json["user"], config_json["password"], "lz", "flags")
def validate_datasets(self):
"""
Runs the validation script provided to the class upon initialization using the user's
dataset files as standard input.
:return:
"""
if self._validation_script == None:
return
dataset_files = self.identify_dataset_files()
validation_process = Popen(['python', self._tool_directory + "/../../bin/" + self._validation_script],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
# output is a tuple containing (stdout, stderr)
output = validation_process.communicate(json.dumps(dataset_files))
if validation_process.returncode == 1:
raise ValidationException(output[1])
def identify_dependencies(self):
"""
An abstract method to be addressed by a specialized export tool that furnishes a dependency json list.
:return: The dependency json list to be returned should look as follows:
[dependency1, dependency2, ... ]
where each dependency is written as a json object as follows:
{
"resourceIdentifier": <value>,
"resourceVersion": <value>,
"resourceDisplayName": <value
}
Where no dependencies exist, an empty list is returned
"""
raise NotImplementedError(
"The method 'identify_dependencies(self)' needs to be implemented in the specialized export module.")
def identify_projects(self):
"""
An abstract method to be addressed by a specialized export tool that furnishes a EuPathDB project list.
:return: The project list to be returned should look as follows:
[project1, project2, ... ]
At least one valid EuPathDB project must be listed
"""
raise NotImplementedError(
"The method 'identify_project(self)' needs to be implemented in the specialized export module.")
def identify_supported_projects(self):
"""
Override this method to provide a non-default list of projects.
Default is None, interpreted as all projects are ok, ie, no constraints.
"""
return None;
def identify_dataset_files(self):
"""
An abstract method to be addressed by a specialized export tool that furnishes a json list
containing the dataset data files and the EuPath file names they must have in the tarball.
:return: The dataset file list to be returned should look as follows:
[dataset file1, dataset file2, ... ]
where each dataset file is written as a json object as follows:
{
"name":<filename that EuPathDB expects>,
"path":<Galaxy path to the dataset file>
At least one valid EuPathDB dataset file must be listed
"""
raise NotImplementedError(
"The method 'identify_dataset_file(self)' needs to be implemented in the specialized export module.")
def create_dataset_json_file(self, temp_path):
""" Create and populate the dataset.json file that must be included in the tarball."""
# Get the total size of the dataset files (needed for the json file)
size = sum(os.stat(dataset_file['path']).st_size for dataset_file in self.identify_dataset_files())
if self.identify_supported_projects() != None:
for (project) in self.identify_projects():
if project not in self.identify_supported_projects():
raise ValidationException("Sorry, you cannot export this kind of data to " + project)
dataset_path = temp_path + "/" + self.DATASET_JSON
with open(dataset_path, "w+") as json_file:
json.dump({
"type": {"name": self._type, "version": self._version},
"dependencies": self.identify_dependencies(),
"projects": self.identify_projects(),
"dataFiles": self.create_data_file_metadata(),
"owner": self._user_id,
"size": size,
"created": self._timestamp
}, json_file, indent=4)
def create_metadata_json_file(self, temp_path):
"""" Create and populate the meta.json file that must be included in the tarball."""
meta_path = temp_path + "/" + self.META_JSON
with open(meta_path, "w+") as json_file:
json.dump({"name": self._dataset_name,
"summary": self._summary,
"description": self._description
}, json_file, indent=4)
def create_data_file_metadata(self):
"""
Create a json object holding metadata for an array of dataset files.
:return: json object to be inserted into dataset.json
"""
dataset_files_metadata = []
for dataset_file in self.identify_dataset_files():
dataset_file_metadata = {}
dataset_file_metadata["name"] = re.sub(r"\s+", "_", dataset_file['name'])
dataset_file_metadata["size"] = os.stat(dataset_file['path']).st_size
dataset_files_metadata.append(dataset_file_metadata)
return dataset_files_metadata
def package_data_files(self, temp_path):
"""
Copies the user's dataset files to the datafiles folder of the temporary dir and changes each
dataset filename conferred by Galaxy to a filename expected by EuPathDB
"""
os.mkdir(temp_path + "/" + self.DATAFILES)
for dataset_file in self.identify_dataset_files():
shutil.copy(dataset_file['path'], temp_path + "/" + self.DATAFILES + "/" + re.sub(r"\s+", "_", dataset_file['name']))
def create_tarball(self):
"""
Package the tarball - contains meta.json, dataset.json and a datafiles folder containing the
user's dataset files
"""
with tarfile.open(self._export_file_root + ".tgz", "w:gz") as tarball:
for item in [self.META_JSON, self.DATASET_JSON, self.DATAFILES]:
tarball.add(item)
def process_request(self, collection, source_file):
"""
This method wraps the iRODS rest request into a try/catch to insure that bad responses are
reflected back to the user.
:param collection: the name of the workspaces collection to which the file is to be uploaded
:param source_file: the name of the file to be uploaded to iRODS
"""
rest_response = self.send_request(collection, source_file)
try:
rest_response.raise_for_status()
except requests.exceptions.HTTPError as e:
print >> sys.stderr, "Error: " + str(e)
sys.exit(1)
def send_request(self, collection, source_file):
"""
This request is intended as a multi-part form post containing one file to be uploaded. iRODS Rest
does an iput followed by an iget, apparently. So the response can be used to insure proper
delivery.
:param collection: the name of the workspaces collection to which the file is to be uploaded
:param source_file: the name of the file to be uploaded to iRODS
:return: the http response from an iget of the uploaded file
"""
request = self._url + collection + "/" + source_file
headers = {"Accept": "application/json"}
upload_file = {"uploadFile": open(source_file, "rb")}
auth = HTTPBasicAuth(self._user, self._pwd)
try:
response = requests.post(request, auth=auth, headers=headers, files=upload_file)
response.raise_for_status()
except Exception as e:
print >> sys.stderr, "Error: The dataset export could not be completed at this time. The EuPathDB" \
" workspace may be unavailable presently. " + str(e)
sys.exit(2)
return response
def get_flag(self, collection, source_file):
"""
This method picks up any flag (success or failure) from the flags collection in iRODs related to the dataset
exported to determine whether the export was successful. If not, the nature of the failure is reported to the
user. The failure report will normally be very general unless the problem is one that can possibly be remedied
by the user (e.g., going over quota).
:param collection: The iRODS collection holding the status flags
:param source_file: The dataset tarball name sans extension
"""
time.sleep(5) # arbitrary wait period before one time check for a flag.
auth = HTTPBasicAuth(self._user, self._pwd)
# Look for the presence of a success flag first and if none found, check for a failure flag. If neither
# found, assume that to be a failure also.
try:
request = self._url + collection + "/" + "success_" + source_file
success = requests.get(request, auth=auth, timeout=5)
if success.status_code == 404:
request = self._url + collection + "/" + "failure_" + source_file
failure = requests.get(request, auth=auth, timeout=5)
if failure.status_code != 404:
raise TransferException(failure.content)
else:
failure.raise_for_status()
else:
self.output_success()
print >> sys.stdout, "Your dataset has been successfully exported to EuPathDB."
print >> sys.stdout, "Please visit an appropriate EuPathDB site to view your dataset."
except (requests.exceptions.ConnectionError, TransferException) as e:
print >> sys.stderr, "Error: " + str(e)
sys.exit(1)
def connection_diagnostic(self):
"""
Used to insure that the calling ip is the one expected (i.e., the one for which the
firewall is opened). In Globus Dev Galaxy instance calling the tool outside of Galaxy
versus inside Galaxy resulted in different calling ip addresses.
"""
request = "http://ifconfig.co"
headers = {"Accept": "application/json"}
try:
response = requests.get(request, headers=headers)
response.raise_for_status()
print >> sys.stdout, "Diagnostic Result: " + response.content
except Exception as e:
print >> sys.stderr, "Diagnostic Error: " + str(e)
def export(self):
"""
Does the work of exporting to EuPathDB, a tarball consisting of the user's dataset files along
with dataset and metadata json files.
"""
# Apply the validation first. If it fails, exit with a data error.
self.validate_datasets()
# We need to save the current working directory so we can get back to it when we are
# finished working in our temporary directory.
orig_path = os.getcwd()
# We need to create a temporary directory in which to assemble the tarball.
with self.temporary_directory(self._export_file_root) as temp_path:
# Need to temporarily work inside the temporary directory to properly construct and
# send the tarball
os.chdir(temp_path)
self.package_data_files(temp_path)
self.create_metadata_json_file(temp_path)
self.create_dataset_json_file(temp_path)
self.create_tarball()
# Uncomment to check the calling ip address for this tool.
# self.connection_diagnostic()
# Call the iRODS rest service to drop the tarball into the iRODS workspace landing zone
self.process_request(self._lz_coll, self._export_file_root + ".tgz")
# Create a empty (flag) file corresponding to the tarball
open(self._export_file_root + ".txt", "w").close()
# Call the iRODS rest service to drop a flag into the IRODS workspace flags collection. This flag
# triggers the iRODS PEP that unpacks the tarball and posts the event to Jenkins
self.process_request(self._flag_coll, self._export_file_root + ".txt")
# Look for a success/fail indication from IRODS.
self.get_flag(self._flag_coll, self._export_file_root)
# We exit the temporary directory prior to removing it, back to the original working directory.
os.chdir(orig_path)
@contextlib.contextmanager
def temporary_directory(self, dir_name):
"""
This method creates a temporary directory such that removal is assured once the
program completes.
:param dir_name: The name of the temporary directory
:return: The full path to the temporary directory
"""
temp_path = tempfile.mkdtemp(dir_name)
try:
yield temp_path
finally:
# Added the boolean arg because cannot remove top level of temp dir structure in
# Globus Dev Galaxy instance and it will throw an Exception if the boolean, 'True', is not in place.
shutil.rmtree(temp_path, True)
def output_success(self):
header = "<html><body><h1>Good news!</h1><br />"
msg = """
<h2>Results of the EuPathDB Export Tool<br />Bigwig Files to EuPathDB</h2>
<h3>Your set of bigwig files was exported from Galaxy to your account in EuPathDB.
For file access and to view in GBrowse, go to My Data Sets in the appropriate EuPathDB site:
</h3><br />
Go to the appropriate EuPathDB site (links below) to see it (and all your User Datasets):<br \>
<a href='http://amoebadb.org/amoeba/app/workspace/datasets'>AmoebaDB</a><br />
<a href='http://cryptodb.org/cryptodb/app/workspace/datasets'>CryptoDB</a><br />
<a href='http://fungidb.org/fungidb/app/workspace/datasets'>FungiDB</a><br />
<a href='http://giardiadb.org/giardiadb/app/workspace/datasets'>GiardiaDB</a><br />
<a href='http://hostdb.org/hostdb/app/workspace/datasets'>HostDB</a><br />
<a href='http://microsporidiadb.org/micro/app/workspace/datasets'>MicrosporidiaDB</a><br />
<a href='http://piroplasmadb.org/piro/app/workspace/datasets'>PiroplasmaDB</a><br />
<a href='http://plasmodb.org/plasmo/app/workspace/datasets'>PlasmoDB</a><br />
<a href='http://schistodb.net/schisto/app/workspace/datasets'>SchistoDB</a><br />
<a href='http://toxodb.org/toxo/app/workspace/datasets'>ToxoDB</a><br />
<a href='http://trichdb.org/trichdb/app/workspace/datasets'>TrichDB</a><br />
<a href='http://tritrypdb.org/tritrypdb/app/workspace/datasets'>TriTrypDB</a><br />
</body></html>
"""
with open(self._output, 'w') as file:
file.write("%s%s" % (header,msg))
class ValidationException(Exception):
"""
This represents the exception reported when a call to a validation script returns a data error.
"""
pass
class TransferException(Exception):
"""
This represents the exception reported when the export of a dataset to the iRODS system returns a failure.
"""
pass
|
the-stack_0_13124 | # ---------------------------------------------------------------------
# Iskratel.MSAN.get_interface_status
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetinterfacestatus import IGetInterfaceStatus
class Script(BaseScript):
name = "Iskratel.MSAN.get_interface_status"
interface = IGetInterfaceStatus
rx_port = re.compile(
r"^(?P<port>\d+/\d+)\s+(?:\s+|PC|PC Mbr)\s+"
r"(?P<admin_status>Enable|Disable)\s+"
r"(?:Auto|1000 Full)\s+"
r"(?:\s+|Auto|100 Full|1000 Full)\s+"
r"(?P<oper_status>Up|Down)\s+(?:Enable|Disable)\s+"
r"(?:Enable|Disable)(?P<descr>.*?)?\n",
re.MULTILINE,
)
def execute_cli(self, interface=None):
r = []
for match in self.rx_port.finditer(self.cli("show port all")):
if (interface is not None) and (interface == match.group("port")):
return [
{
"interface": match.group("port"),
"status": match.group("oper_status") != "Down",
}
]
r += [
{"interface": match.group("port"), "status": match.group("oper_status") != "Down"}
]
return r
|
the-stack_0_13125 | """
File for test case generation
"""
import time
from typing import List
import kubernetes as k8s
from illuminatio.k8s_util import labels_to_string
from illuminatio.rule import Rule
from illuminatio.test_case import NetworkTestCase
from illuminatio.host import ClusterHost, GenericClusterHost
from illuminatio.util import rand_port, INVERTED_ATTRIBUTE_PREFIX
def _get_other_host_from(connection_targets, rule_namespace):
namespace_labels = "namespaceLabels"
pod_labels = "podLabels"
namespace = "namespace"
if namespace_labels in connection_targets and pod_labels in connection_targets:
return GenericClusterHost(connection_targets[namespace_labels], connection_targets[pod_labels])
if namespace in connection_targets and pod_labels in connection_targets:
return ClusterHost(connection_targets[namespace], connection_targets[pod_labels])
if namespace_labels in connection_targets: # and no podLabels included
return GenericClusterHost(connection_targets[namespace_labels], {})
if pod_labels in connection_targets:
return ClusterHost(rule_namespace, connection_targets[pod_labels])
if connection_targets == {}:
return GenericClusterHost({}, {})
raise ValueError("Unknown combination of field in connection %s" % connection_targets)
def get_namespace_label_strings(namespace_labels, namespaces):
"""
Returns a set of all stringified namespace labels
"""
# list of all namespace names with labels
return {labels_to_string(namespace_label): [namespace.metadata.name for namespace in namespaces
if namespace.metadata.labels is not None and
namespace_label.items() <= namespace.metadata.labels.items()]
for namespace_label in namespace_labels}
class NetworkTestCaseGenerator:
"""
Class for Generating Test cases out of a k8s NetworkPolicy and saving them to a specified format
"""
def __init__(self, log):
self.logger = log
def generate_test_cases(self,
network_policies: List[k8s.client.V1NetworkPolicy],
namespaces: List[k8s.client.V1Namespace]):
"""
Generates positive and negative test cases, also returns measured runtimes
"""
runtimes = {}
start_time = time.time()
isolated_hosts = []
other_hosts = []
outgoing_test_cases = []
incoming_test_cases = []
self.logger.debug("Generating test cases for %s", network_policies)
rules = [Rule.from_network_policy(netPol) for netPol in network_policies]
net_pol_parsing_time = time.time()
runtimes["parse"] = net_pol_parsing_time - start_time
self.logger.debug("Rule: %s", rules)
for rule in rules:
rule_host = ClusterHost(rule.concerns["namespace"], rule.concerns["podLabels"])
if rule_host not in isolated_hosts:
isolated_hosts.append(rule_host)
if rule.allowed: # means it is NOT default deny rule
for connection in rule.allowed:
for port in connection.ports:
on_port = port
other_host = _get_other_host_from(connection.targets, rule.concerns["namespace"])
other_hosts.append(other_host)
if connection.direction == "to":
case = NetworkTestCase(rule_host, other_host, on_port, True)
outgoing_test_cases.append(case)
elif connection.direction == "from":
case = NetworkTestCase(other_host, rule_host, on_port, True)
incoming_test_cases.append(case)
else:
raise ValueError("Direction '%s' unknown!" % connection.direction)
positive_test_time = time.time()
runtimes["positiveTestGen"] = positive_test_time - net_pol_parsing_time
negative_test_cases, negative_test_gen_runtimes = self.generate_negative_cases_for_incoming_cases(
isolated_hosts,
incoming_test_cases,
other_hosts, namespaces)
runtimes["negativeTestGen"] = negative_test_gen_runtimes
return outgoing_test_cases + negative_test_cases + incoming_test_cases, runtimes
# TODO: implement it also for outgoing test cases
# TODO: divide this into submethods
def generate_negative_cases_for_incoming_cases(self, isolated_hosts, incoming_test_cases, other_hosts, namespaces):
"""
Generates negative test cases based on desired positive test cases
"""
runtimes = {}
start_time = time.time()
# list of all namespace labels set on other hosts
namespace_labels = [h.namespace_labels for h in other_hosts if isinstance(h, GenericClusterHost)]
namespaces_per_label_strings = get_namespace_label_strings(namespace_labels, namespaces)
namespace_label_resolve_time = time.time()
runtimes["nsLabelResolve"] = namespace_label_resolve_time - start_time
labels_per_namespace = {n.metadata.name: n.metadata.labels for n in namespaces}
overlaps_per_host = {
host: self.get_overlapping_hosts(host, namespaces_per_label_strings, labels_per_namespace,
isolated_hosts + other_hosts)
for host in isolated_hosts}
overlap_calc_time = time.time()
runtimes["overlapCalc"] = overlap_calc_time - namespace_label_resolve_time
cases = []
for host in isolated_hosts:
host_string = str(host)
host_start_time = time.time()
runtimes[host_string] = {}
# Check for hosts that can target these to construct negative cases from
self.logger.debug(overlaps_per_host[host])
allowed_hosts_with_ports = [(test_case.from_host, test_case.port_string)
for test_case in incoming_test_cases if
test_case.to_host in overlaps_per_host[host]]
self.logger.debug("allowed_hosts_with_ports=%s", allowed_hosts_with_ports)
reaching_host_find_time = time.time()
runtimes[host_string]["findReachingHosts"] = reaching_host_find_time - host_start_time
if allowed_hosts_with_ports:
allowed_hosts, _ = zip(*allowed_hosts_with_ports)
ports_per_host = {host: [port for _host, port in allowed_hosts_with_ports if _host == host]
for host in allowed_hosts}
match_all_host = GenericClusterHost({}, {})
if match_all_host in allowed_hosts:
# All hosts are allowed to reach (on some ports or all) => results from ALLOW all
if "*" in ports_per_host[match_all_host]:
self.logger.info("Not generating negative tests for host %s"
"as all connections to it are allowed", host)
else:
cases.append(NetworkTestCase(match_all_host, host,
rand_port(ports_per_host[match_all_host]), False))
runtimes[host_string]["matchAllCase"] = time.time() - reaching_host_find_time
else:
inverted_hosts = set([h for l in [invert_host(host) for host in allowed_hosts] for h in l])
hosts_on_inverted = {h: originalHost for l, originalHost in
[(invert_host(host), host) for host in allowed_hosts] for h in l}
host_inversion_time = time.time()
runtimes[host_string]["hostInversion"] = host_inversion_time - reaching_host_find_time
overlaps_for_inverted_hosts = {
h: self.get_overlapping_hosts(h, namespaces_per_label_strings,
labels_per_namespace, allowed_hosts)
for h in inverted_hosts}
overlap_calc_time = time.time()
runtimes[host_string]["overlapCalc"] = overlap_calc_time - host_inversion_time
self.logger.debug("InvertedHosts: %s", inverted_hosts)
negative_test_targets = [h for h in inverted_hosts if len(overlaps_for_inverted_hosts[h]) <= 1]
self.logger.debug("NegativeTestTargets: %s", negative_test_targets)
# now remove the inverted hosts that are reachable
for target in negative_test_targets:
ports_for_inverted_hosts_original_host = ports_per_host[hosts_on_inverted[target]]
if ports_for_inverted_hosts_original_host:
cases.append(
NetworkTestCase(target, host, ports_for_inverted_hosts_original_host[0], False))
else:
cases.append(NetworkTestCase(target, host, "*", False))
runtimes[host_string]["casesGen"] = time.time() - overlap_calc_time
else:
# No hosts are allowed to reach host -> it should be totally isolated
# => results from default deny policy
cases.append(NetworkTestCase(host, host, "*", False))
runtimes["all"] = time.time() - start_time
return cases, runtimes
def get_overlapping_hosts(self, host, namespaces_per_label_strings, labels_per_namespace, other_hosts):
"""
Returns a list of hosts that might be selected by the same policies
"""
out = [host]
for other in other_hosts:
if host is not other:
namespace_overlap = self.namespaces_overlap(host, namespaces_per_label_strings,
labels_per_namespace, other)
pod_label_overlap = label_selector_overlap(other.pod_labels, host.pod_labels)
if namespace_overlap and pod_label_overlap:
out.append(other)
return out
def namespaces_overlap(self, host, namespaces_per_label_strings, labels_per_namespace, other_host):
"""
Checks whether two hosts have namespaces in common
"""
host_ns = self.resolve_namespaces(host, namespaces_per_label_strings)
other_ns = self.resolve_namespaces(other_host, namespaces_per_label_strings)
if host_ns and other_ns:
return any(ns in other_ns for ns in host_ns)
ns_labels = lookup_namespace_labels(host, labels_per_namespace)
other_ns_labels = lookup_namespace_labels(other_host, labels_per_namespace)
if ns_labels is not None and other_ns_labels is not None:
return label_selector_overlap(ns_labels, other_ns_labels)
return False
def resolve_namespaces(self, host, namespaces_per_label_strings):
"""
Returns the namespace of a given host
"""
self.logger.debug(host)
if isinstance(host, ClusterHost):
return [host.namespace]
labels = labels_to_string(host.namespace_labels)
return namespaces_per_label_strings[labels] if labels in namespaces_per_label_strings else []
def invert_host(host):
"""
Returns a list of either inverted GenericClusterHosts or inverted ClusterHosts
"""
if isinstance(host, GenericClusterHost):
return invert_generic_cluster_host(host)
if isinstance(host, ClusterHost):
return invert_cluster_host(host)
raise ValueError("Host %s is of unsupported type" % host)
def invert_cluster_host(host: ClusterHost):
"""
Returns a list of ClusterHosts with
once inverted pod label selectors,
once inverted namespace label selectors
and once both
"""
if host.pod_labels == {}:
return [ClusterHost("%s%s" % (INVERTED_ATTRIBUTE_PREFIX, host.namespace), {})]
inverted_hosts = [ClusterHost("%s%s" % (INVERTED_ATTRIBUTE_PREFIX, host.namespace), host.pod_labels),
ClusterHost("%s%s" % (INVERTED_ATTRIBUTE_PREFIX, host.namespace),
invert_label_selector(host.pod_labels)),
ClusterHost(host.namespace, invert_label_selector(host.pod_labels))]
return inverted_hosts
def invert_generic_cluster_host(host: GenericClusterHost):
"""
Returns a list of GenericClusterHosts with
once inverted pod label selectors,
once inverted namespace label selectors
and once both
"""
if host == GenericClusterHost({}, {}):
raise ValueError("Cannot invert GenericClusterHost matching all hosts in cluster")
if host.namespace_labels == {}:
return [GenericClusterHost({}, invert_label_selector(host.pod_labels))]
inverted_hosts = [GenericClusterHost(host.namespace_labels, invert_label_selector(host.pod_labels)),
GenericClusterHost(invert_label_selector(host.namespace_labels), host.pod_labels),
GenericClusterHost(invert_label_selector(host.namespace_labels),
invert_label_selector(host.pod_labels))]
return inverted_hosts
def invert_label_selector(labels):
"""
Inverts a label selector
"""
return {"%s%s" % (INVERTED_ATTRIBUTE_PREFIX, k): v for k, v in labels.items()}
def label_selector_overlap(label_selector_1, label_selector_2):
"""
Returns the intersection of two label selectors
"""
if label_selector_1 and label_selector_2:
return any(item in label_selector_2.items() for item in label_selector_1.items())
# if one of the label selector dicts is empty, they always overlap, as empty label selectors select all labels
return True
def lookup_namespace_labels(host, labels_per_namespace):
"""
Returns the namespace labels of a host
"""
if isinstance(host, GenericClusterHost):
return host.namespace_labels
if host.namespace in labels_per_namespace:
return labels_per_namespace[host.namespace]
return None
|
the-stack_0_13127 | import glob
import os
import argparse
from mega_core.config import cfg
from predictor import VIDDemo
parser = argparse.ArgumentParser(description="PyTorch Object Detection Visualization")
parser.add_argument(
"method",
choices=["base", "dff", "fgfa", "rdn", "mega"],
default="base",
type=str,
help="which method to use",
)
parser.add_argument(
"config",
default="configs/vid_R_101_C4_1x.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"checkpoint",
default="R_101.pth",
help="The path to the checkpoint for test.",
)
parser.add_argument(
"--visualize-path",
default="datasets/ILSVRC2015/Data/VID/val/ILSVRC2015_val_00003001",
# default="datasets/ILSVRC2015/Data/VID/snippets/val/ILSVRC2015_val_00003001.mp4",
help="the folder or a video to visualize.",
)
parser.add_argument(
"--suffix",
default=".JPEG",
help="the suffix of the images in the image folder.",
)
parser.add_argument(
"--output-folder",
default="demo/visualization/base",
help="where to store the visulization result.",
)
parser.add_argument(
"--video",
action="store_true",
help="if True, input a video for visualization.",
)
parser.add_argument(
"--output-video",
action="store_true",
help="if True, output a video.",
)
args = parser.parse_args()
cfg.merge_from_file("configs/BASE_RCNN_1gpu.yaml")
cfg.merge_from_file(args.config)
cfg.merge_from_list(["MODEL.WEIGHT", args.checkpoint])
vid_demo = VIDDemo(
cfg,
method=args.method,
confidence_threshold=0.1,
output_folder=args.output_folder
)
if not args.video:
visualization_results = vid_demo.run_on_image_folder(args.visualize_path, suffix=args.suffix)
else:
visualization_results = vid_demo.run_on_video(args.visualize_path)
if not args.output_video:
vid_demo.generate_images(visualization_results)
else:
vid_demo.generate_video(visualization_results) |
the-stack_0_13130 | """
this module is used to extract and preprocess from the raw data
N.B. but some preprocessing are done manually
"""
import os
import json
data_path = os.path.dirname(__file__) + '/../src/bangla/data/'
def get_word_list():
with open(data_path + 'words.txt', 'r', encoding='utf-8') as file:
words_list = file.read().split('\n')
return [i for i in words_list if i != '']
def get_letters():
with open(data_path + 'bangla_letters.json', mode='r', encoding='utf-8') as file:
letters = json.loads(file.read())
return letters["letters"]
def get_numbers():
with open(data_path + 'bangla_letters.json', mode='r', encoding='utf-8') as file:
letters = json.loads(file.read())
return letters["numbers"]
def descriminate(letter, word_list = get_word_list()):
return list(set([i for i in word_list if i[0] == letter]))
"""
print([i for i in get_word_list() if i[0] == '-'])
temp_word_list = [i for i in get_word_list() if ' ' in i]
temp_word_dict = {}
for i in get_letters():
temp = [i for i in descriminate(i, temp_word_list) if len(i.split()) > 1]
temp_word_dict.update({i: temp})
json.dump(temp_word_dict, open(data_path + 'temp_bangla.json', mode='w', encoding='utf-8'), ensure_ascii = False)
"""
word_dict = {}
for i in get_letters():
descriminate_val = descriminate(i)
if len(descriminate_val) > 0: word_dict.update({i: descriminate_val})
else: word_dict.update({i: i})
for i in get_numbers():
word_dict.update({i: i})
json.dump(word_dict, open(data_path + 'bangla.json', mode='w', encoding='utf-8'), ensure_ascii = False) |
the-stack_0_13131 | import math
import pyaudio
import itertools
import numpy as np
from pygame import midi
BUFFER_SIZE = 256
SAMPLE_RATE = 44100
NOTE_AMP = 0.1
# -- HELPER FUNCTIONS --
def get_sin_oscillator(freq=55, amp=1, sample_rate=SAMPLE_RATE):
increment = (2 * math.pi * freq)/ sample_rate
return (math.sin(v) * amp * NOTE_AMP \
for v in itertools.count(start=0, step=increment))
def get_samples(notes_dict, num_samples=BUFFER_SIZE):
return [sum([int(next(osc) * 32767) \
for _, osc in notes_dict.items()]) \
for _ in range(num_samples)]
# -- INITIALIZION --
midi.init()
default_id = midi.get_default_input_id()
midi_input = midi.Input(device_id=default_id)
stream = pyaudio.PyAudio().open(
rate=SAMPLE_RATE,
channels=1,
format=pyaudio.paInt16,
output=True,
frames_per_buffer=BUFFER_SIZE
)
# -- RUN THE SYNTH --
try:
print("Starting...")
notes_dict = {}
while True:
if notes_dict:
# Play the notes
samples = get_samples(notes_dict)
samples = np.int16(samples).tobytes()
stream.write(samples)
if midi_input.poll():
# Add or remove notes from notes_dict
for event in midi_input.read(num_events=16):
(status, note, vel, _), _ = event
if status == 0x80 and note in notes_dict:
del notes_dict[note]
elif status == 0x90 and note not in notes_dict:
freq = midi.midi_to_frequency(note)
notes_dict[note] = get_sin_oscillator(freq=freq, amp=vel/127)
except KeyboardInterrupt as err:
midi_input.close()
stream.close()
print("Stopping...")
|
the-stack_0_13133 | # -*- coding: utf-8 -*-
"""Cisco DNA Center GetAuditlogParentRecords data model.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorF8E3A0674C15Fd58Cd78F42Dca37C7C(object):
"""GetAuditlogParentRecords request schema definition."""
def __init__(self):
super(JSONSchemaValidatorF8E3A0674C15Fd58Cd78F42Dca37C7C, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"items": {
"properties": {
"additionalDetails": {
"type": "object"
},
"category": {
"type": "string"
},
"childCount": {
"type": "number"
},
"ciscoDnaEventLink": {
"type": "string"
},
"context": {
"type": "string"
},
"description":
{
"type": "string"
},
"details": {
"type": "object"
},
"domain": {
"type": "string"
},
"eventHierarchy": {
"type": "string"
},
"eventId": {
"type": "string"
},
"i18n": {
"type": "string"
},
"instanceId": {
"type": "string"
},
"message": {
"type": "string"
},
"messageParams": {
"type": "string"
},
"name": {
"type": "string"
},
"namespace": {
"type": "string"
},
"network": {
"type": "string"
},
"note": {
"type": "string"
},
"parentInstanceId": {
"type": "string"
},
"severity": {
"type": "integer"
},
"source": {
"type": "string"
},
"subDomain": {
"type": "string"
},
"tags": {
"type": "array"
},
"tenantId": {
"type": "string"
},
"timestamp": {
"type": "integer"
},
"tntId": {
"type": "string"
},
"type": {
"type": "string"
},
"userId": {
"type": "string"
},
"version": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
|
the-stack_0_13134 | import os
from os.path import exists
import pytest
from pip._internal.cli.status_codes import PREVIOUS_BUILD_DIR_ERROR
from pip._internal.utils.marker_files import write_delete_marker_file
from tests.lib import need_mercurial
from tests.lib.local_repos import local_checkout
def test_cleanup_after_install(script, data):
"""
Test clean up after installing a package.
"""
script.pip(
'install', '--no-index',
'--find-links={}'.format(data.find_links),
'simple'
)
build = script.venv_path / "build"
src = script.venv_path / "src"
assert not exists(build), "build/ dir still exists: {}".format(build)
assert not exists(src), "unexpected src/ dir exists: {}" .format(src)
script.assert_no_temp()
@pytest.mark.network
def test_no_clean_option_blocks_cleaning_after_install(script, data):
"""
Test --no-clean option blocks cleaning after install
"""
build = script.base_path / 'pip-build'
script.pip(
'install', '--no-clean', '--no-index', '--build', build,
'--find-links={}'.format(data.find_links), 'simple', expect_temp=True,
)
assert exists(build)
@pytest.mark.network
@need_mercurial
def test_cleanup_after_install_editable_from_hg(script, tmpdir):
"""
Test clean up after cloning from Mercurial.
"""
requirement = '{}#egg=ScriptTest'.format(
local_checkout('hg+https://bitbucket.org/ianb/scripttest', tmpdir)
)
script.pip('install', '-e', requirement)
build = script.venv_path / 'build'
src = script.venv_path / 'src'
assert not exists(build), "build/ dir still exists: {}".format(build)
assert exists(src), "expected src/ dir doesn't exist: {}".format(src)
script.assert_no_temp()
def test_cleanup_after_install_from_local_directory(script, data):
"""
Test clean up after installing from a local directory.
"""
to_install = data.packages.joinpath("FSPkg")
script.pip('install', to_install)
build = script.venv_path / 'build'
src = script.venv_path / 'src'
assert not exists(build), "unexpected build/ dir exists: {}".format(build)
assert not exists(src), "unexpected src/ dir exist: {}".format(src)
script.assert_no_temp()
def test_cleanup_req_satisfied_no_name(script, data):
"""
Test cleanup when req is already satisfied, and req has no 'name'
"""
# this test confirms Issue #420 is fixed
# reqs with no 'name' that were already satisfied were leaving behind tmp
# build dirs
# 2 examples of reqs that would do this
# 1) https://bitbucket.org/ianb/initools/get/tip.zip
# 2) parent-0.1.tar.gz
dist = data.packages.joinpath("parent-0.1.tar.gz")
script.pip('install', dist)
script.pip('install', dist)
build = script.venv_path / 'build'
assert not exists(build), "unexpected build/ dir exists: %s" % build
script.assert_no_temp()
def test_cleanup_after_install_exception(script, data):
"""
Test clean up after a 'setup.py install' exception.
"""
# broken==0.2broken fails during install; see packages readme file
result = script.pip(
'install', '-f', data.find_links, '--no-index', 'broken==0.2broken',
expect_error=True,
)
build = script.venv_path / 'build'
assert not exists(build), "build/ dir still exists: %s" % result.stdout
script.assert_no_temp()
def test_cleanup_after_egg_info_exception(script, data):
"""
Test clean up after a 'setup.py egg_info' exception.
"""
# brokenegginfo fails during egg_info; see packages readme file
result = script.pip(
'install', '-f', data.find_links, '--no-index', 'brokenegginfo==0.1',
expect_error=True,
)
build = script.venv_path / 'build'
assert not exists(build), "build/ dir still exists: %s" % result.stdout
script.assert_no_temp()
@pytest.mark.network
def test_cleanup_prevented_upon_build_dir_exception(script, data):
"""
Test no cleanup occurs after a PreviousBuildDirError
"""
build = script.venv_path / 'build'
build_simple = build / 'simple'
os.makedirs(build_simple)
write_delete_marker_file(build_simple)
build_simple.joinpath("setup.py").write_text("#")
result = script.pip(
'install', '-f', data.find_links, '--no-index', 'simple',
'--build', build,
expect_error=True, expect_temp=True,
)
assert result.returncode == PREVIOUS_BUILD_DIR_ERROR, str(result)
assert "pip can't proceed" in result.stderr, str(result)
assert exists(build_simple), str(result)
@pytest.mark.network
def test_pep517_no_legacy_cleanup(script, data, with_wheel):
"""Test a PEP 517 failed build does not attempt a legacy cleanup"""
to_install = data.packages.joinpath('pep517_wrapper_buildsys')
script.environ["PIP_TEST_FAIL_BUILD_WHEEL"] = "1"
res = script.pip(
'install', '-f', data.find_links, to_install,
expect_error=True
)
# Must not have built the package
expected = "Failed building wheel for pep517-wrapper-buildsys"
assert expected in str(res)
# Must not have attempted legacy cleanup
assert "setup.py clean" not in str(res)
|
the-stack_0_13135 | import numpy as np
import matplotlib.pyplot as plt
from audio import spec2wav, wav2spec, read_wav, write_wav
if __name__ == '__main__':
sr = 22050
n_fft = 512
win_length = 400
hop_length = 80
duration = 2 # sec
wav = read_wav( "H:\\cs230\\wav_x\\1_1.wav", sr, duration )
spec, _ = wav2spec(wav, n_fft, win_length, hop_length, False)
converted_wav = spec2wav(spec, n_fft, win_length, hop_length, 600)
write_wav(converted_wav, sr, 'a.wav')
plt.pcolormesh(spec)
plt.ylabel('Frequency')
plt.xlabel('Time')
plt.savefig("a.png")
|
the-stack_0_13137 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .rigolDS1000Z import *
class rigolDS1074Z(rigolDS1000Z):
"Rigol DS1074Z IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DS1074Z')
super(rigolDS1074Z, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 70e6
self._init_channels()
|
the-stack_0_13138 | import contextlib
import ctypes
import json
import os
import shutil
import struct
import subprocess
import sys
import tempfile
import time
from datetime import datetime, timedelta, timezone
from enum import Enum, auto
from hashlib import pbkdf2_hmac
from .aes import (
aes_cbc_decrypt_bytes,
aes_gcm_decrypt_and_verify_bytes,
unpad_pkcs7,
)
from .compat import compat_b64decode, compat_cookiejar_Cookie
from .dependencies import (
_SECRETSTORAGE_UNAVAILABLE_REASON,
secretstorage,
sqlite3,
)
from .minicurses import MultilinePrinter, QuietMultilinePrinter
from .utils import Popen, YoutubeDLCookieJar, error_to_str, expand_path
CHROMIUM_BASED_BROWSERS = {'brave', 'chrome', 'chromium', 'edge', 'opera', 'vivaldi'}
SUPPORTED_BROWSERS = CHROMIUM_BASED_BROWSERS | {'firefox', 'safari'}
class YDLLogger:
def __init__(self, ydl=None):
self._ydl = ydl
def debug(self, message):
if self._ydl:
self._ydl.write_debug(message)
def info(self, message):
if self._ydl:
self._ydl.to_screen(f'[Cookies] {message}')
def warning(self, message, only_once=False):
if self._ydl:
self._ydl.report_warning(message, only_once)
def error(self, message):
if self._ydl:
self._ydl.report_error(message)
class ProgressBar(MultilinePrinter):
_DELAY, _timer = 0.1, 0
def print(self, message):
if time.time() - self._timer > self._DELAY:
self.print_at_line(f'[Cookies] {message}', 0)
self._timer = time.time()
def progress_bar(self):
"""Return a context manager with a print method. (Optional)"""
# Do not print to files/pipes, loggers, or when --no-progress is used
if not self._ydl or self._ydl.params.get('noprogress') or self._ydl.params.get('logger'):
return
file = self._ydl._out_files['error']
try:
if not file.isatty():
return
except BaseException:
return
return self.ProgressBar(file, preserve_output=False)
def _create_progress_bar(logger):
if hasattr(logger, 'progress_bar'):
printer = logger.progress_bar()
if printer:
return printer
printer = QuietMultilinePrinter()
printer.print = lambda _: None
return printer
def load_cookies(cookie_file, browser_specification, ydl):
cookie_jars = []
if browser_specification is not None:
browser_name, profile, keyring = _parse_browser_specification(*browser_specification)
cookie_jars.append(extract_cookies_from_browser(browser_name, profile, YDLLogger(ydl), keyring=keyring))
if cookie_file is not None:
is_filename = YoutubeDLCookieJar.is_path(cookie_file)
if is_filename:
cookie_file = expand_path(cookie_file)
jar = YoutubeDLCookieJar(cookie_file)
if not is_filename or os.access(cookie_file, os.R_OK):
jar.load(ignore_discard=True, ignore_expires=True)
cookie_jars.append(jar)
return _merge_cookie_jars(cookie_jars)
def extract_cookies_from_browser(browser_name, profile=None, logger=YDLLogger(), *, keyring=None):
if browser_name == 'firefox':
return _extract_firefox_cookies(profile, logger)
elif browser_name == 'safari':
return _extract_safari_cookies(profile, logger)
elif browser_name in CHROMIUM_BASED_BROWSERS:
return _extract_chrome_cookies(browser_name, profile, keyring, logger)
else:
raise ValueError(f'unknown browser: {browser_name}')
def _extract_firefox_cookies(profile, logger):
logger.info('Extracting cookies from firefox')
if not sqlite3:
logger.warning('Cannot extract cookies from firefox without sqlite3 support. '
'Please use a python interpreter compiled with sqlite3 support')
return YoutubeDLCookieJar()
if profile is None:
search_root = _firefox_browser_dir()
elif _is_path(profile):
search_root = profile
else:
search_root = os.path.join(_firefox_browser_dir(), profile)
cookie_database_path = _find_most_recently_used_file(search_root, 'cookies.sqlite', logger)
if cookie_database_path is None:
raise FileNotFoundError(f'could not find firefox cookies database in {search_root}')
logger.debug(f'Extracting cookies from: "{cookie_database_path}"')
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
cursor = None
try:
cursor = _open_database_copy(cookie_database_path, tmpdir)
cursor.execute('SELECT host, name, value, path, expiry, isSecure FROM moz_cookies')
jar = YoutubeDLCookieJar()
with _create_progress_bar(logger) as progress_bar:
table = cursor.fetchall()
total_cookie_count = len(table)
for i, (host, name, value, path, expiry, is_secure) in enumerate(table):
progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}')
cookie = compat_cookiejar_Cookie(
version=0, name=name, value=value, port=None, port_specified=False,
domain=host, domain_specified=bool(host), domain_initial_dot=host.startswith('.'),
path=path, path_specified=bool(path), secure=is_secure, expires=expiry, discard=False,
comment=None, comment_url=None, rest={})
jar.set_cookie(cookie)
logger.info(f'Extracted {len(jar)} cookies from firefox')
return jar
finally:
if cursor is not None:
cursor.connection.close()
def _firefox_browser_dir():
if sys.platform in ('linux', 'linux2'):
return os.path.expanduser('~/.mozilla/firefox')
elif sys.platform == 'win32':
return os.path.expandvars(R'%APPDATA%\Mozilla\Firefox\Profiles')
elif sys.platform == 'darwin':
return os.path.expanduser('~/Library/Application Support/Firefox')
else:
raise ValueError(f'unsupported platform: {sys.platform}')
def _get_chromium_based_browser_settings(browser_name):
# https://chromium.googlesource.com/chromium/src/+/HEAD/docs/user_data_dir.md
if sys.platform in ('linux', 'linux2'):
config = _config_home()
browser_dir = {
'brave': os.path.join(config, 'BraveSoftware/Brave-Browser'),
'chrome': os.path.join(config, 'google-chrome'),
'chromium': os.path.join(config, 'chromium'),
'edge': os.path.join(config, 'microsoft-edge'),
'opera': os.path.join(config, 'opera'),
'vivaldi': os.path.join(config, 'vivaldi'),
}[browser_name]
elif sys.platform == 'win32':
appdata_local = os.path.expandvars('%LOCALAPPDATA%')
appdata_roaming = os.path.expandvars('%APPDATA%')
browser_dir = {
'brave': os.path.join(appdata_local, R'BraveSoftware\Brave-Browser\User Data'),
'chrome': os.path.join(appdata_local, R'Google\Chrome\User Data'),
'chromium': os.path.join(appdata_local, R'Chromium\User Data'),
'edge': os.path.join(appdata_local, R'Microsoft\Edge\User Data'),
'opera': os.path.join(appdata_roaming, R'Opera Software\Opera Stable'),
'vivaldi': os.path.join(appdata_local, R'Vivaldi\User Data'),
}[browser_name]
elif sys.platform == 'darwin':
appdata = os.path.expanduser('~/Library/Application Support')
browser_dir = {
'brave': os.path.join(appdata, 'BraveSoftware/Brave-Browser'),
'chrome': os.path.join(appdata, 'Google/Chrome'),
'chromium': os.path.join(appdata, 'Chromium'),
'edge': os.path.join(appdata, 'Microsoft Edge'),
'opera': os.path.join(appdata, 'com.operasoftware.Opera'),
'vivaldi': os.path.join(appdata, 'Vivaldi'),
}[browser_name]
else:
raise ValueError(f'unsupported platform: {sys.platform}')
# Linux keyring names can be determined by snooping on dbus while opening the browser in KDE:
# dbus-monitor "interface='org.kde.KWallet'" "type=method_return"
keyring_name = {
'brave': 'Brave',
'chrome': 'Chrome',
'chromium': 'Chromium',
'edge': 'Microsoft Edge' if sys.platform == 'darwin' else 'Chromium',
'opera': 'Opera' if sys.platform == 'darwin' else 'Chromium',
'vivaldi': 'Vivaldi' if sys.platform == 'darwin' else 'Chrome',
}[browser_name]
browsers_without_profiles = {'opera'}
return {
'browser_dir': browser_dir,
'keyring_name': keyring_name,
'supports_profiles': browser_name not in browsers_without_profiles
}
def _extract_chrome_cookies(browser_name, profile, keyring, logger):
logger.info(f'Extracting cookies from {browser_name}')
if not sqlite3:
logger.warning(f'Cannot extract cookies from {browser_name} without sqlite3 support. '
'Please use a python interpreter compiled with sqlite3 support')
return YoutubeDLCookieJar()
config = _get_chromium_based_browser_settings(browser_name)
if profile is None:
search_root = config['browser_dir']
elif _is_path(profile):
search_root = profile
config['browser_dir'] = os.path.dirname(profile) if config['supports_profiles'] else profile
else:
if config['supports_profiles']:
search_root = os.path.join(config['browser_dir'], profile)
else:
logger.error(f'{browser_name} does not support profiles')
search_root = config['browser_dir']
cookie_database_path = _find_most_recently_used_file(search_root, 'Cookies', logger)
if cookie_database_path is None:
raise FileNotFoundError(f'could not find {browser_name} cookies database in "{search_root}"')
logger.debug(f'Extracting cookies from: "{cookie_database_path}"')
decryptor = get_cookie_decryptor(config['browser_dir'], config['keyring_name'], logger, keyring=keyring)
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
cursor = None
try:
cursor = _open_database_copy(cookie_database_path, tmpdir)
cursor.connection.text_factory = bytes
column_names = _get_column_names(cursor, 'cookies')
secure_column = 'is_secure' if 'is_secure' in column_names else 'secure'
cursor.execute(f'SELECT host_key, name, value, encrypted_value, path, expires_utc, {secure_column} FROM cookies')
jar = YoutubeDLCookieJar()
failed_cookies = 0
unencrypted_cookies = 0
with _create_progress_bar(logger) as progress_bar:
table = cursor.fetchall()
total_cookie_count = len(table)
for i, line in enumerate(table):
progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}')
is_encrypted, cookie = _process_chrome_cookie(decryptor, *line)
if not cookie:
failed_cookies += 1
continue
elif not is_encrypted:
unencrypted_cookies += 1
jar.set_cookie(cookie)
if failed_cookies > 0:
failed_message = f' ({failed_cookies} could not be decrypted)'
else:
failed_message = ''
logger.info(f'Extracted {len(jar)} cookies from {browser_name}{failed_message}')
counts = decryptor.cookie_counts.copy()
counts['unencrypted'] = unencrypted_cookies
logger.debug(f'cookie version breakdown: {counts}')
return jar
finally:
if cursor is not None:
cursor.connection.close()
def _process_chrome_cookie(decryptor, host_key, name, value, encrypted_value, path, expires_utc, is_secure):
host_key = host_key.decode()
name = name.decode()
value = value.decode()
path = path.decode()
is_encrypted = not value and encrypted_value
if is_encrypted:
value = decryptor.decrypt(encrypted_value)
if value is None:
return is_encrypted, None
return is_encrypted, compat_cookiejar_Cookie(
version=0, name=name, value=value, port=None, port_specified=False,
domain=host_key, domain_specified=bool(host_key), domain_initial_dot=host_key.startswith('.'),
path=path, path_specified=bool(path), secure=is_secure, expires=expires_utc, discard=False,
comment=None, comment_url=None, rest={})
class ChromeCookieDecryptor:
"""
Overview:
Linux:
- cookies are either v10 or v11
- v10: AES-CBC encrypted with a fixed key
- v11: AES-CBC encrypted with an OS protected key (keyring)
- v11 keys can be stored in various places depending on the activate desktop environment [2]
Mac:
- cookies are either v10 or not v10
- v10: AES-CBC encrypted with an OS protected key (keyring) and more key derivation iterations than linux
- not v10: 'old data' stored as plaintext
Windows:
- cookies are either v10 or not v10
- v10: AES-GCM encrypted with a key which is encrypted with DPAPI
- not v10: encrypted with DPAPI
Sources:
- [1] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/
- [2] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/key_storage_linux.cc
- KeyStorageLinux::CreateService
"""
def decrypt(self, encrypted_value):
raise NotImplementedError('Must be implemented by sub classes')
@property
def cookie_counts(self):
raise NotImplementedError('Must be implemented by sub classes')
def get_cookie_decryptor(browser_root, browser_keyring_name, logger, *, keyring=None):
if sys.platform in ('linux', 'linux2'):
return LinuxChromeCookieDecryptor(browser_keyring_name, logger, keyring=keyring)
elif sys.platform == 'darwin':
return MacChromeCookieDecryptor(browser_keyring_name, logger)
elif sys.platform == 'win32':
return WindowsChromeCookieDecryptor(browser_root, logger)
else:
raise NotImplementedError(f'Chrome cookie decryption is not supported on this platform: {sys.platform}')
class LinuxChromeCookieDecryptor(ChromeCookieDecryptor):
def __init__(self, browser_keyring_name, logger, *, keyring=None):
self._logger = logger
self._v10_key = self.derive_key(b'peanuts')
password = _get_linux_keyring_password(browser_keyring_name, keyring, logger)
self._v11_key = None if password is None else self.derive_key(password)
self._cookie_counts = {'v10': 0, 'v11': 0, 'other': 0}
@staticmethod
def derive_key(password):
# values from
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_linux.cc
return pbkdf2_sha1(password, salt=b'saltysalt', iterations=1, key_length=16)
@property
def cookie_counts(self):
return self._cookie_counts
def decrypt(self, encrypted_value):
version = encrypted_value[:3]
ciphertext = encrypted_value[3:]
if version == b'v10':
self._cookie_counts['v10'] += 1
return _decrypt_aes_cbc(ciphertext, self._v10_key, self._logger)
elif version == b'v11':
self._cookie_counts['v11'] += 1
if self._v11_key is None:
self._logger.warning('cannot decrypt v11 cookies: no key found', only_once=True)
return None
return _decrypt_aes_cbc(ciphertext, self._v11_key, self._logger)
else:
self._cookie_counts['other'] += 1
return None
class MacChromeCookieDecryptor(ChromeCookieDecryptor):
def __init__(self, browser_keyring_name, logger):
self._logger = logger
password = _get_mac_keyring_password(browser_keyring_name, logger)
self._v10_key = None if password is None else self.derive_key(password)
self._cookie_counts = {'v10': 0, 'other': 0}
@staticmethod
def derive_key(password):
# values from
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_mac.mm
return pbkdf2_sha1(password, salt=b'saltysalt', iterations=1003, key_length=16)
@property
def cookie_counts(self):
return self._cookie_counts
def decrypt(self, encrypted_value):
version = encrypted_value[:3]
ciphertext = encrypted_value[3:]
if version == b'v10':
self._cookie_counts['v10'] += 1
if self._v10_key is None:
self._logger.warning('cannot decrypt v10 cookies: no key found', only_once=True)
return None
return _decrypt_aes_cbc(ciphertext, self._v10_key, self._logger)
else:
self._cookie_counts['other'] += 1
# other prefixes are considered 'old data' which were stored as plaintext
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_mac.mm
return encrypted_value
class WindowsChromeCookieDecryptor(ChromeCookieDecryptor):
def __init__(self, browser_root, logger):
self._logger = logger
self._v10_key = _get_windows_v10_key(browser_root, logger)
self._cookie_counts = {'v10': 0, 'other': 0}
@property
def cookie_counts(self):
return self._cookie_counts
def decrypt(self, encrypted_value):
version = encrypted_value[:3]
ciphertext = encrypted_value[3:]
if version == b'v10':
self._cookie_counts['v10'] += 1
if self._v10_key is None:
self._logger.warning('cannot decrypt v10 cookies: no key found', only_once=True)
return None
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_win.cc
# kNonceLength
nonce_length = 96 // 8
# boringssl
# EVP_AEAD_AES_GCM_TAG_LEN
authentication_tag_length = 16
raw_ciphertext = ciphertext
nonce = raw_ciphertext[:nonce_length]
ciphertext = raw_ciphertext[nonce_length:-authentication_tag_length]
authentication_tag = raw_ciphertext[-authentication_tag_length:]
return _decrypt_aes_gcm(ciphertext, self._v10_key, nonce, authentication_tag, self._logger)
else:
self._cookie_counts['other'] += 1
# any other prefix means the data is DPAPI encrypted
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_win.cc
return _decrypt_windows_dpapi(encrypted_value, self._logger).decode()
def _extract_safari_cookies(profile, logger):
if profile is not None:
logger.error('safari does not support profiles')
if sys.platform != 'darwin':
raise ValueError(f'unsupported platform: {sys.platform}')
cookies_path = os.path.expanduser('~/Library/Cookies/Cookies.binarycookies')
if not os.path.isfile(cookies_path):
logger.debug('Trying secondary cookie location')
cookies_path = os.path.expanduser('~/Library/Containers/com.apple.Safari/Data/Library/Cookies/Cookies.binarycookies')
if not os.path.isfile(cookies_path):
raise FileNotFoundError('could not find safari cookies database')
with open(cookies_path, 'rb') as f:
cookies_data = f.read()
jar = parse_safari_cookies(cookies_data, logger=logger)
logger.info(f'Extracted {len(jar)} cookies from safari')
return jar
class ParserError(Exception):
pass
class DataParser:
def __init__(self, data, logger):
self._data = data
self.cursor = 0
self._logger = logger
def read_bytes(self, num_bytes):
if num_bytes < 0:
raise ParserError(f'invalid read of {num_bytes} bytes')
end = self.cursor + num_bytes
if end > len(self._data):
raise ParserError('reached end of input')
data = self._data[self.cursor:end]
self.cursor = end
return data
def expect_bytes(self, expected_value, message):
value = self.read_bytes(len(expected_value))
if value != expected_value:
raise ParserError(f'unexpected value: {value} != {expected_value} ({message})')
def read_uint(self, big_endian=False):
data_format = '>I' if big_endian else '<I'
return struct.unpack(data_format, self.read_bytes(4))[0]
def read_double(self, big_endian=False):
data_format = '>d' if big_endian else '<d'
return struct.unpack(data_format, self.read_bytes(8))[0]
def read_cstring(self):
buffer = []
while True:
c = self.read_bytes(1)
if c == b'\x00':
return b''.join(buffer).decode()
else:
buffer.append(c)
def skip(self, num_bytes, description='unknown'):
if num_bytes > 0:
self._logger.debug(f'skipping {num_bytes} bytes ({description}): {self.read_bytes(num_bytes)!r}')
elif num_bytes < 0:
raise ParserError(f'invalid skip of {num_bytes} bytes')
def skip_to(self, offset, description='unknown'):
self.skip(offset - self.cursor, description)
def skip_to_end(self, description='unknown'):
self.skip_to(len(self._data), description)
def _mac_absolute_time_to_posix(timestamp):
return int((datetime(2001, 1, 1, 0, 0, tzinfo=timezone.utc) + timedelta(seconds=timestamp)).timestamp())
def _parse_safari_cookies_header(data, logger):
p = DataParser(data, logger)
p.expect_bytes(b'cook', 'database signature')
number_of_pages = p.read_uint(big_endian=True)
page_sizes = [p.read_uint(big_endian=True) for _ in range(number_of_pages)]
return page_sizes, p.cursor
def _parse_safari_cookies_page(data, jar, logger):
p = DataParser(data, logger)
p.expect_bytes(b'\x00\x00\x01\x00', 'page signature')
number_of_cookies = p.read_uint()
record_offsets = [p.read_uint() for _ in range(number_of_cookies)]
if number_of_cookies == 0:
logger.debug(f'a cookies page of size {len(data)} has no cookies')
return
p.skip_to(record_offsets[0], 'unknown page header field')
with _create_progress_bar(logger) as progress_bar:
for i, record_offset in enumerate(record_offsets):
progress_bar.print(f'Loading cookie {i: 6d}/{number_of_cookies: 6d}')
p.skip_to(record_offset, 'space between records')
record_length = _parse_safari_cookies_record(data[record_offset:], jar, logger)
p.read_bytes(record_length)
p.skip_to_end('space in between pages')
def _parse_safari_cookies_record(data, jar, logger):
p = DataParser(data, logger)
record_size = p.read_uint()
p.skip(4, 'unknown record field 1')
flags = p.read_uint()
is_secure = bool(flags & 0x0001)
p.skip(4, 'unknown record field 2')
domain_offset = p.read_uint()
name_offset = p.read_uint()
path_offset = p.read_uint()
value_offset = p.read_uint()
p.skip(8, 'unknown record field 3')
expiration_date = _mac_absolute_time_to_posix(p.read_double())
_creation_date = _mac_absolute_time_to_posix(p.read_double()) # noqa: F841
try:
p.skip_to(domain_offset)
domain = p.read_cstring()
p.skip_to(name_offset)
name = p.read_cstring()
p.skip_to(path_offset)
path = p.read_cstring()
p.skip_to(value_offset)
value = p.read_cstring()
except UnicodeDecodeError:
logger.warning('failed to parse Safari cookie because UTF-8 decoding failed', only_once=True)
return record_size
p.skip_to(record_size, 'space at the end of the record')
cookie = compat_cookiejar_Cookie(
version=0, name=name, value=value, port=None, port_specified=False,
domain=domain, domain_specified=bool(domain), domain_initial_dot=domain.startswith('.'),
path=path, path_specified=bool(path), secure=is_secure, expires=expiration_date, discard=False,
comment=None, comment_url=None, rest={})
jar.set_cookie(cookie)
return record_size
def parse_safari_cookies(data, jar=None, logger=YDLLogger()):
"""
References:
- https://github.com/libyal/dtformats/blob/main/documentation/Safari%20Cookies.asciidoc
- this data appears to be out of date but the important parts of the database structure is the same
- there are a few bytes here and there which are skipped during parsing
"""
if jar is None:
jar = YoutubeDLCookieJar()
page_sizes, body_start = _parse_safari_cookies_header(data, logger)
p = DataParser(data[body_start:], logger)
for page_size in page_sizes:
_parse_safari_cookies_page(p.read_bytes(page_size), jar, logger)
p.skip_to_end('footer')
return jar
class _LinuxDesktopEnvironment(Enum):
"""
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base/nix/xdg_util.h
DesktopEnvironment
"""
OTHER = auto()
CINNAMON = auto()
GNOME = auto()
KDE = auto()
PANTHEON = auto()
UNITY = auto()
XFCE = auto()
class _LinuxKeyring(Enum):
"""
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/key_storage_util_linux.h
SelectedLinuxBackend
"""
KWALLET = auto()
GNOMEKEYRING = auto()
BASICTEXT = auto()
SUPPORTED_KEYRINGS = _LinuxKeyring.__members__.keys()
def _get_linux_desktop_environment(env):
"""
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base/nix/xdg_util.cc
GetDesktopEnvironment
"""
xdg_current_desktop = env.get('XDG_CURRENT_DESKTOP', None)
desktop_session = env.get('DESKTOP_SESSION', None)
if xdg_current_desktop is not None:
xdg_current_desktop = xdg_current_desktop.split(':')[0].strip()
if xdg_current_desktop == 'Unity':
if desktop_session is not None and 'gnome-fallback' in desktop_session:
return _LinuxDesktopEnvironment.GNOME
else:
return _LinuxDesktopEnvironment.UNITY
elif xdg_current_desktop == 'GNOME':
return _LinuxDesktopEnvironment.GNOME
elif xdg_current_desktop == 'X-Cinnamon':
return _LinuxDesktopEnvironment.CINNAMON
elif xdg_current_desktop == 'KDE':
return _LinuxDesktopEnvironment.KDE
elif xdg_current_desktop == 'Pantheon':
return _LinuxDesktopEnvironment.PANTHEON
elif xdg_current_desktop == 'XFCE':
return _LinuxDesktopEnvironment.XFCE
elif desktop_session is not None:
if desktop_session in ('mate', 'gnome'):
return _LinuxDesktopEnvironment.GNOME
elif 'kde' in desktop_session:
return _LinuxDesktopEnvironment.KDE
elif 'xfce' in desktop_session:
return _LinuxDesktopEnvironment.XFCE
else:
if 'GNOME_DESKTOP_SESSION_ID' in env:
return _LinuxDesktopEnvironment.GNOME
elif 'KDE_FULL_SESSION' in env:
return _LinuxDesktopEnvironment.KDE
return _LinuxDesktopEnvironment.OTHER
def _choose_linux_keyring(logger):
"""
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/key_storage_util_linux.cc
SelectBackend
"""
desktop_environment = _get_linux_desktop_environment(os.environ)
logger.debug(f'detected desktop environment: {desktop_environment.name}')
if desktop_environment == _LinuxDesktopEnvironment.KDE:
linux_keyring = _LinuxKeyring.KWALLET
elif desktop_environment == _LinuxDesktopEnvironment.OTHER:
linux_keyring = _LinuxKeyring.BASICTEXT
else:
linux_keyring = _LinuxKeyring.GNOMEKEYRING
return linux_keyring
def _get_kwallet_network_wallet(logger):
""" The name of the wallet used to store network passwords.
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/kwallet_dbus.cc
KWalletDBus::NetworkWallet
which does a dbus call to the following function:
https://api.kde.org/frameworks/kwallet/html/classKWallet_1_1Wallet.html
Wallet::NetworkWallet
"""
default_wallet = 'kdewallet'
try:
proc = Popen([
'dbus-send', '--session', '--print-reply=literal',
'--dest=org.kde.kwalletd5',
'/modules/kwalletd5',
'org.kde.KWallet.networkWallet'
], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
stdout, stderr = proc.communicate_or_kill()
if proc.returncode != 0:
logger.warning('failed to read NetworkWallet')
return default_wallet
else:
network_wallet = stdout.decode().strip()
logger.debug(f'NetworkWallet = "{network_wallet}"')
return network_wallet
except Exception as e:
logger.warning(f'exception while obtaining NetworkWallet: {e}')
return default_wallet
def _get_kwallet_password(browser_keyring_name, logger):
logger.debug('using kwallet-query to obtain password from kwallet')
if shutil.which('kwallet-query') is None:
logger.error('kwallet-query command not found. KWallet and kwallet-query '
'must be installed to read from KWallet. kwallet-query should be'
'included in the kwallet package for your distribution')
return b''
network_wallet = _get_kwallet_network_wallet(logger)
try:
proc = Popen([
'kwallet-query',
'--read-password', f'{browser_keyring_name} Safe Storage',
'--folder', f'{browser_keyring_name} Keys',
network_wallet
], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
stdout, stderr = proc.communicate_or_kill()
if proc.returncode != 0:
logger.error(f'kwallet-query failed with return code {proc.returncode}. Please consult '
'the kwallet-query man page for details')
return b''
else:
if stdout.lower().startswith(b'failed to read'):
logger.debug('failed to read password from kwallet. Using empty string instead')
# this sometimes occurs in KDE because chrome does not check hasEntry and instead
# just tries to read the value (which kwallet returns "") whereas kwallet-query
# checks hasEntry. To verify this:
# dbus-monitor "interface='org.kde.KWallet'" "type=method_return"
# while starting chrome.
# this may be a bug as the intended behaviour is to generate a random password and store
# it, but that doesn't matter here.
return b''
else:
logger.debug('password found')
if stdout[-1:] == b'\n':
stdout = stdout[:-1]
return stdout
except Exception as e:
logger.warning(f'exception running kwallet-query: {error_to_str(e)}')
return b''
def _get_gnome_keyring_password(browser_keyring_name, logger):
if not secretstorage:
logger.error(f'secretstorage not available {_SECRETSTORAGE_UNAVAILABLE_REASON}')
return b''
# the Gnome keyring does not seem to organise keys in the same way as KWallet,
# using `dbus-monitor` during startup, it can be observed that chromium lists all keys
# and presumably searches for its key in the list. It appears that we must do the same.
# https://github.com/jaraco/keyring/issues/556
with contextlib.closing(secretstorage.dbus_init()) as con:
col = secretstorage.get_default_collection(con)
for item in col.get_all_items():
if item.get_label() == f'{browser_keyring_name} Safe Storage':
return item.get_secret()
else:
logger.error('failed to read from keyring')
return b''
def _get_linux_keyring_password(browser_keyring_name, keyring, logger):
# note: chrome/chromium can be run with the following flags to determine which keyring backend
# it has chosen to use
# chromium --enable-logging=stderr --v=1 2>&1 | grep key_storage_
# Chromium supports a flag: --password-store=<basic|gnome|kwallet> so the automatic detection
# will not be sufficient in all cases.
keyring = _LinuxKeyring[keyring] if keyring else _choose_linux_keyring(logger)
logger.debug(f'Chosen keyring: {keyring.name}')
if keyring == _LinuxKeyring.KWALLET:
return _get_kwallet_password(browser_keyring_name, logger)
elif keyring == _LinuxKeyring.GNOMEKEYRING:
return _get_gnome_keyring_password(browser_keyring_name, logger)
elif keyring == _LinuxKeyring.BASICTEXT:
# when basic text is chosen, all cookies are stored as v10 (so no keyring password is required)
return None
assert False, f'Unknown keyring {keyring}'
def _get_mac_keyring_password(browser_keyring_name, logger):
logger.debug('using find-generic-password to obtain password from OSX keychain')
try:
proc = Popen(
['security', 'find-generic-password',
'-w', # write password to stdout
'-a', browser_keyring_name, # match 'account'
'-s', f'{browser_keyring_name} Safe Storage'], # match 'service'
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
stdout, stderr = proc.communicate_or_kill()
if stdout[-1:] == b'\n':
stdout = stdout[:-1]
return stdout
except Exception as e:
logger.warning(f'exception running find-generic-password: {error_to_str(e)}')
return None
def _get_windows_v10_key(browser_root, logger):
path = _find_most_recently_used_file(browser_root, 'Local State', logger)
if path is None:
logger.error('could not find local state file')
return None
logger.debug(f'Found local state file at "{path}"')
with open(path, encoding='utf8') as f:
data = json.load(f)
try:
base64_key = data['os_crypt']['encrypted_key']
except KeyError:
logger.error('no encrypted key in Local State')
return None
encrypted_key = compat_b64decode(base64_key)
prefix = b'DPAPI'
if not encrypted_key.startswith(prefix):
logger.error('invalid key')
return None
return _decrypt_windows_dpapi(encrypted_key[len(prefix):], logger)
def pbkdf2_sha1(password, salt, iterations, key_length):
return pbkdf2_hmac('sha1', password, salt, iterations, key_length)
def _decrypt_aes_cbc(ciphertext, key, logger, initialization_vector=b' ' * 16):
plaintext = unpad_pkcs7(aes_cbc_decrypt_bytes(ciphertext, key, initialization_vector))
try:
return plaintext.decode()
except UnicodeDecodeError:
logger.warning('failed to decrypt cookie (AES-CBC) because UTF-8 decoding failed. Possibly the key is wrong?', only_once=True)
return None
def _decrypt_aes_gcm(ciphertext, key, nonce, authentication_tag, logger):
try:
plaintext = aes_gcm_decrypt_and_verify_bytes(ciphertext, key, authentication_tag, nonce)
except ValueError:
logger.warning('failed to decrypt cookie (AES-GCM) because the MAC check failed. Possibly the key is wrong?', only_once=True)
return None
try:
return plaintext.decode()
except UnicodeDecodeError:
logger.warning('failed to decrypt cookie (AES-GCM) because UTF-8 decoding failed. Possibly the key is wrong?', only_once=True)
return None
def _decrypt_windows_dpapi(ciphertext, logger):
"""
References:
- https://docs.microsoft.com/en-us/windows/win32/api/dpapi/nf-dpapi-cryptunprotectdata
"""
from ctypes.wintypes import DWORD
class DATA_BLOB(ctypes.Structure):
_fields_ = [('cbData', DWORD),
('pbData', ctypes.POINTER(ctypes.c_char))]
buffer = ctypes.create_string_buffer(ciphertext)
blob_in = DATA_BLOB(ctypes.sizeof(buffer), buffer)
blob_out = DATA_BLOB()
ret = ctypes.windll.crypt32.CryptUnprotectData(
ctypes.byref(blob_in), # pDataIn
None, # ppszDataDescr: human readable description of pDataIn
None, # pOptionalEntropy: salt?
None, # pvReserved: must be NULL
None, # pPromptStruct: information about prompts to display
0, # dwFlags
ctypes.byref(blob_out) # pDataOut
)
if not ret:
logger.warning('failed to decrypt with DPAPI', only_once=True)
return None
result = ctypes.string_at(blob_out.pbData, blob_out.cbData)
ctypes.windll.kernel32.LocalFree(blob_out.pbData)
return result
def _config_home():
return os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config'))
def _open_database_copy(database_path, tmpdir):
# cannot open sqlite databases if they are already in use (e.g. by the browser)
database_copy_path = os.path.join(tmpdir, 'temporary.sqlite')
shutil.copy(database_path, database_copy_path)
conn = sqlite3.connect(database_copy_path)
return conn.cursor()
def _get_column_names(cursor, table_name):
table_info = cursor.execute(f'PRAGMA table_info({table_name})').fetchall()
return [row[1].decode() for row in table_info]
def _find_most_recently_used_file(root, filename, logger):
# if there are multiple browser profiles, take the most recently used one
i, paths = 0, []
with _create_progress_bar(logger) as progress_bar:
for curr_root, dirs, files in os.walk(root):
for file in files:
i += 1
progress_bar.print(f'Searching for "{filename}": {i: 6d} files searched')
if file == filename:
paths.append(os.path.join(curr_root, file))
return None if not paths else max(paths, key=lambda path: os.lstat(path).st_mtime)
def _merge_cookie_jars(jars):
output_jar = YoutubeDLCookieJar()
for jar in jars:
for cookie in jar:
output_jar.set_cookie(cookie)
if jar.filename is not None:
output_jar.filename = jar.filename
return output_jar
def _is_path(value):
return os.path.sep in value
def _parse_browser_specification(browser_name, profile=None, keyring=None):
if browser_name not in SUPPORTED_BROWSERS:
raise ValueError(f'unsupported browser: "{browser_name}"')
if keyring not in (None, *SUPPORTED_KEYRINGS):
raise ValueError(f'unsupported keyring: "{keyring}"')
if profile is not None and _is_path(profile):
profile = os.path.expanduser(profile)
return browser_name, profile, keyring
|
the-stack_0_13140 | import os
import glob
import pandas as pd
game_files = glob.glob(os.path.join(os.getcwd(), 'games', '*.EVE'))
game_files.sort()
game_frames = []
for game_file in game_files:
game_frame = pd.read_csv(game_file, names=[
'type', 'multi2', 'multi3', 'multi4', 'multi5', 'multi6', 'event'])
game_frames.append(game_frame)
games = pd.concat(game_frames)
games.loc[games['multi5'] == '??', ['multi5']] = ''
identifiers = games['multi2'].str.extract(r'(.LS(\d{4})\d{5})')
identifiers = identifiers.fillna(method='ffill')
identifiers.columns = ['game_id', 'year']
games = pd.concat([games, identifiers], axis=1, sort=False)
games = games.fillna(' ')
games.loc[:]['type'] = pd.Categorical(games.loc[:]['type'])
|
the-stack_0_13141 | from collections import OrderedDict
from django.conf import settings
from systems.models.base import BaseModel
from utility.data import Collection, ensure_list, flatten, clean_dict, normalize_value, format_value, prioritize, dump_json
import re
import copy
import yaml
import logging
logger = logging.getLogger(__name__)
noalias_dumper = yaml.dumper.SafeDumper
noalias_dumper.ignore_aliases = lambda self, data: True
class BaseProfileComponent(object):
def __init__(self, name, profile):
self.name = name
self.profile = profile
self.command = profile.command
self.manager = self.command.manager
def priority(self):
return 10
def ensure_module_config(self):
# Override in subclass if needed
return False
def get_names(self, relation):
return [ getattr(x, x.facade.key()) for x in relation.all() ]
def get_info(self, name, config):
return self.profile.get_info(name, config)
def pop_info(self, name, config):
return self.profile.pop_info(name, config)
def get_value(self, name, config):
return self.profile.get_value(name, config)
def pop_value(self, name, config):
return self.profile.pop_value(name, config)
def get_values(self, name, config):
return self.profile.get_values(name, config)
def pop_values(self, name, config):
return self.profile.pop_values(name, config)
def interpolate(self, config, **replacements):
return self.profile.interpolate(config, replacements)
def get_variables(self, instance, variables = None):
if not variables:
variables = {}
return self.profile.get_variables(instance, variables)
def exec(self, command, **parameters):
return self.command.exec_local(command, parameters)
def run_list(self, elements, processor):
return self.command.run_list(elements, processor)
class CommandProfile(object):
def __init__(self, module, name = None, data = None):
if not data:
data = {}
self.name = name
self.module = module
self.command = module.command
self.manager = self.command.manager
self.data = data
self.components = []
self.config = Collection()
def get_component_names(self, filter_method = None):
return self.manager.index.load_component_names(self, filter_method)
def initialize(self, config, components):
self.components = components if components else []
if not config:
config = {}
self.init_config(config)
self.load_parents()
self.data = self.get_schema()
def init_config(self, dynamic_config):
self.command.options.initialize(True)
for stored_config in self.command.get_instances(self.command._config):
self.config.set(stored_config.name, stored_config.value)
if isinstance(dynamic_config, dict):
for name, value in dynamic_config.items():
self.config.set(name, value)
def get_config(self):
return self.data.get('config', {})
def set_config(self, config):
if 'config' not in self.data:
self.data['config'] = {}
for name, value in self.interpolate_config(config).items():
self.data['config'][name] = value
def interpolate_config(self, input_config, **options):
config = {}
for name, value in input_config.items():
config[name] = self.interpolate_config_value(value, **options)
if not self.config.check(name):
self.config.set(name, config[name])
return config
def interpolate_config_value(self, value, **options):
options['config_overrides'] = self.config.export()
return normalize_value(self.command.options.interpolate(value, **options))
def load_parents(self):
self.parents = []
self.set_config(self.get_config())
if 'parents' in self.data:
parents = self.data.pop('parents')
for parent in reversed(ensure_list(parents)):
module = self.module.instance
if isinstance(parent, str):
profile_name = self.interpolate_config_value(parent)
else:
profile_name = self.interpolate_config_value(parent['profile'])
if 'module' in parent:
module_name = self.interpolate_config_value(parent['module'])
if module_name != 'self':
module = self.get_module(module_name)
self.parents.insert(0,
module.provider.get_profile(profile_name)
)
for profile in reversed(self.parents):
profile.load_parents()
def get_schema(self):
schema = {'config': {}}
for profile in self.parents:
parent_schema = profile.get_schema()
self.merge_schema(schema, parent_schema)
self.merge_schema(schema, self.data)
for component in self.get_component_names('ensure_module_config'):
if component in schema:
for name, component_config in schema[component].items():
if '_module' not in component_config:
component_config['_module'] = self.module.instance.name
for name, value in schema['config'].items():
if not self.config.check(name):
self.config.set(name, value)
return schema
def merge_schema(self, schema, data):
for key, value in data.items():
if isinstance(value, dict):
schema.setdefault(key, {})
self.merge_schema(schema[key], value)
else:
schema[key] = value
def display_schema(self, operation):
self.command.info('')
self.process_components(operation, display_only = True)
if self.include('profile'):
component = self.manager.index.load_component(self, 'profile')
profiles = self.expand_instances(component.name, self.data)
for profile, config in profiles.items():
if self.include_instance(profile, config):
getattr(component, operation)(profile, config, True)
def run(self, components = None, config = None, display_only = False, test = False):
self.command.data("Running profile:", "{}:{}".format(self.module.instance.name, self.name), 'profile_name')
operation = 'run'
self.initialize(config, components)
if display_only:
self.display_schema(operation)
else:
self.process_components(operation,
extra_config = { 'test': test }
)
def destroy(self, components = None, config = None, display_only = False):
self.command.data("Destroying profile:", "{}:{}".format(self.module.instance.name, self.name), 'profile_name')
def remove_instance(instance_config):
return not instance_config.get('_keep', False)
operation = 'destroy'
self.initialize(config, components)
if display_only:
self.display_schema(operation)
else:
self.process_components(operation, include_method = remove_instance)
def process_components(self, operation, include_method = None, display_only = False, extra_config = None):
component_map = self.manager.index.load_components(self)
for priority, components in sorted(component_map.items()):
def process(component):
operation_method = getattr(component, operation, None)
if callable(operation_method) and self.include(component.name):
if extra_config and isinstance(extra_config, dict):
for property, value in extra_config.items():
setattr(component, property, value)
self._process_component_instances(component,
component_method = operation_method,
include_method = include_method,
display_only = display_only
)
self.command.run_list(components, process)
def _process_component_instances(self, component, component_method, include_method = None, display_only = False):
data = copy.deepcopy(self.data)
requirements = Collection()
processed = Collection()
rendered_instances = OrderedDict() if display_only else None
def get_wait_keys(_name):
wait_keys = []
if _name in requirements and requirements[_name]:
for _child_name in flatten(ensure_list(requirements[_name])):
if processed[_child_name]:
wait_keys.extend(processed[_child_name])
wait_keys.extend(get_wait_keys(_child_name))
return list(set(wait_keys))
def check_include(config):
if not callable(include_method):
return True
return include_method(self.interpolate_config_value(config))
def render_instance(name):
instance_config = copy.deepcopy(data[component.name][name])
name = self.interpolate_config_value(name)
instance_config = self.interpolate_config_value(instance_config,
config = 'query',
config_value = False,
function_suppress = '^\s*\<+[^\>]+\>+\s*$',
conditional_suppress = '\s*\<+[^\>]+\>+\s*'
)
if self.include_instance(name, instance_config):
if '_config' in instance_config:
instance_config = self.interpolate_config_value(instance_config,
function_suppress = '^\s*\<+[^\>]+\>+\s*$',
conditional_suppress = '\s*\<+[^\>]+\>+\s*'
)
component_method(name, instance_config)
rendered_instances[name] = instance_config
def process_instances(interpolate_references):
instance_map = self.order_instances(self.expand_instances(component.name, data,
interpolate_references = interpolate_references
))
for priority, names in sorted(instance_map.items()):
expansion = Collection()
def process_instance(name):
instance_config = copy.deepcopy(data[component.name][name])
name = self.interpolate_config_value(name)
if self.include_instance(name, instance_config):
if isinstance(instance_config, dict):
if '_foreach' in instance_config:
expansion[priority] = True
if priority not in expansion and \
name not in processed and \
check_include(instance_config):
instance_config = self.interpolate_config_value(instance_config)
if isinstance(instance_config, dict):
requirements[name] = instance_config.pop('_requires', [])
if requirements[name]:
instance_config['_wait_keys'] = get_wait_keys(name)
if settings.DEBUG_COMMAND_PROFILES:
self.command.info(yaml.dump(
{ name: instance_config },
Dumper = noalias_dumper
))
log_keys = component_method(name, instance_config)
processed[name] = ensure_list(log_keys) if log_keys else []
if display_only:
self.command.run_list(names, render_instance)
else:
self.command.run_list(names, process_instance)
if not display_only and priority in expansion:
return process_instances(True)
if display_only:
process_instances(True)
self.command.info(yaml.dump(
{ component.name: rendered_instances },
Dumper = noalias_dumper
))
else:
process_instances(False)
self.command.wait_for_tasks([ log_keys for name, log_keys in processed.export().items() ])
def expand_instances(self, component_name, data = None, interpolate_references = True):
instance_data = copy.deepcopy(self.data if data is None else data)
instance_map = {}
def get_replacements(info, replacements, keys = None):
if keys is None:
keys = []
tag = ".".join(keys) if keys else 'value'
if isinstance(info, dict):
replacements["<<{}>>".format(tag)] = info
replacements["<<>{}>>".format(tag)] = dump_json(info)
for key, value in info.items():
get_replacements(value, replacements, keys + [str(key)])
elif isinstance(info, (list, tuple)):
replacements["<<{}>>".format(tag)] = info
replacements["<<>{}>>".format(tag)] = dump_json(info)
for index, value in enumerate(info):
get_replacements(value, replacements, keys + [str(index)])
else:
replacements["<<{}>>".format(tag)] = info
return replacements
def substitute_config(config, replacements):
if isinstance(config, dict):
config = copy.deepcopy(config)
for key in list(config.keys()):
real_key = substitute_config(key, replacements)
real_value = substitute_config(config[key], replacements)
if isinstance(real_key, (dict, list, tuple)) or real_key != key:
config.pop(key, None)
if isinstance(real_key, dict):
for sub_key, sub_value in real_key.items():
config[sub_key] = sub_value if sub_value is not None else real_value
elif isinstance(real_key, (list, tuple)):
for sub_key in real_key:
config[sub_key] = real_value
else:
config[real_key] = real_value
elif isinstance(config, (list, tuple)):
config = copy.deepcopy(config)
for index, value in enumerate(config):
config[index] = substitute_config(value, replacements)
else:
for token in replacements.keys():
if str(config) == token:
config = replacements[token]
else:
replacement = replacements[token]
if isinstance(replacements[token], (list, tuple, dict)):
replacement = dump_json(replacements[token])
if isinstance(config, str):
config = config.replace(token, str(replacement))
if isinstance(config, str) and re.match(r'^\<\<.*\>\>$', config):
config = None
return config
for name, config in instance_data[component_name].items():
if config and isinstance(config, dict):
collection = config.get('_foreach', None)
if collection and (interpolate_references or not isinstance(collection, str) or not collection.startswith('&')):
config.pop('_foreach')
collection = self.interpolate_config_value(collection)
if isinstance(collection, (list, tuple)):
for item in collection:
replacements = get_replacements(item, {})
new_name = self.interpolate_config_value(substitute_config(name, replacements))
instance_map[new_name] = substitute_config(config, replacements)
elif isinstance(collection, dict):
for key, item in collection.items():
replacements = get_replacements(item, {
"<<dict_key>>": key
})
new_name = self.interpolate_config_value(substitute_config(name, replacements))
instance_map[new_name] = substitute_config(config, replacements)
else:
self.command.error("Component instance expansions must be lists or dictionaries: {}".format(collection))
else:
instance_map[name] = config
else:
instance_map[name] = config
for name, config in instance_map.items():
if data is None:
self.data[component_name][name] = config
else:
data[component_name][name] = config
return instance_map
def order_instances(self, configs):
for name, value in configs.items():
if isinstance(value, dict) and '_requires' in value and value['_requires'] is not None:
value['_requires'] = self.interpolate_config_value(value['_requires'])
return prioritize(configs, keep_requires = True, requires_field = '_requires')
def include(self, component, force = False, check_data = True):
if component == 'profile' and 'profile' in self.data:
return True
if not force and self.components and component not in self.components:
return False
if check_data and component not in self.data:
return False
return True
def include_inner(self, component, force = False):
return self.include(component,
force = force,
check_data = False
)
def include_instance(self, name, config):
if isinstance(config, dict):
when = config.pop('_when', None)
when_not = config.pop('_when_not', None)
when_in = config.pop('_when_in', None)
when_not_in = config.pop('_when_not_in', None)
when_type = config.pop('_when_type', 'AND').upper()
if when is not None:
result = True if when_type == 'AND' else False
for variable in ensure_list(when):
value = format_value('bool', self.interpolate_config_value(variable))
if when_type == 'AND':
if not value:
return False
else:
if value:
result = True
return result
if when_not is not None:
result = True if when_type == 'AND' else False
for variable in ensure_list(when_not):
value = format_value('bool', self.interpolate_config_value(variable))
if when_type == 'AND':
if value:
return False
else:
if not value:
result = True
return result
if when_in is not None:
value = self.interpolate_config_value(when_in)
return name in ensure_list(value)
if when_not_in is not None:
value = self.interpolate_config_value(when_not_in)
return name not in ensure_list(value)
return True
def get_variables(self, instance, variables = None):
if not variables:
variables = {}
system_fields = [ x.name for x in instance.facade.system_field_instances ]
if getattr(instance, 'config', None) and isinstance(instance.config, dict):
for name, value in instance.config.items():
variables[name] = value
for field in instance.facade.fields:
value = getattr(instance, field)
if not isinstance(value, BaseModel) and field[0] != '_' and field not in system_fields:
variables[field] = value
return clean_dict(variables)
def get_instances(self, facade_name, excludes = None):
if not excludes:
excludes = []
facade_index = self.manager.index.get_facade_index()
excludes = ensure_list(excludes)
instances = []
for instance in self.command.get_instances(facade_index[facade_name]):
if not excludes or instance.name not in excludes:
instances.append(instance)
return instances
def get_module(self, name):
facade = self.command.facade(self.command._module)
return self.command.get_instance(facade, name, required = False)
def get_info(self, name, config, remove = True):
if remove:
value = config.pop(name, None)
else:
value = config.get(name, None)
return value
def pop_info(self, name, config):
return self.get_info(name, config, True)
def get_value(self, name, config, remove = False):
value = self.get_info(name, config, remove)
if value is not None:
value = self.interpolate_config_value(value)
return value
def pop_value(self, name, config):
return self.get_value(name, config, True)
def get_values(self, name, config, remove = False):
value = self.get_value(name, config, remove)
return ensure_list(value) if value is not None else []
def pop_values(self, name, config):
return self.get_values(name, config, True)
def interpolate(self, config, replacements = None):
if not replacements:
replacements = {}
def _interpolate(data):
if isinstance(data, dict):
for key, value in data.items():
data[key] = _interpolate(value)
elif isinstance(data, (list, tuple)):
for index, value in enumerate(data):
data[index] = _interpolate(value)
elif isinstance(data, str):
data = re.sub(r"([\{\}])", r"\1\1", data)
data = re.sub(r"\<([a-z][\_\-a-z0-9]+)\>", r"{\1}", data)
data = data.format(**replacements)
return data
if replacements:
return _interpolate(copy.deepcopy(config))
return config
|
the-stack_0_13142 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import json
import os
import re
import subprocess
import tempfile
from . import utils
def get_verbose():
"""Return if in verbose mode."""
verbose = 0
for e in ["V", "VERBOSE"]:
if e not in os.environ:
continue
verbose = int(os.environ[e])
break
return verbose > 0
def get_yosys():
"""
Searches for the Yosys binary. If the env. var. "YOSYS" is set, then it
checks if it points to a valid executable binary. Otherwise it searches
in PATH for binaries named "yosys" and returns the first one found.
"""
def is_exe(fpath):
"""
Returns True if a file exists and is executable.
"""
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
# The environmental variable "YOSYS" is set. It should point to the Yosys
# executable.
if "YOSYS" in os.environ:
fpath = os.environ["YOSYS"]
if not is_exe(fpath):
return None
return fpath
# Look for the 'yosys' binary in the current PATH but only if the PATH
# variable is available.
elif "PATH" in os.environ:
for path in os.environ["PATH"].split(os.pathsep):
fpath = os.path.join(path, "yosys")
if is_exe(fpath):
return fpath
# Couldn't find Yosys.
return None
def determine_select_prefix():
"""
Older and newer versions of Yosys exhibit different behavior of the
'select' command regarding black/white boxes. Newer version requires a
prefix before some queries. This function determines whether the prefix
is required or not.
"""
# Query help string of the select command
cmd = ["-p", "help select"]
stdout = get_output(cmd, no_common_args=True)
# Look for the phrase. If found then the prefix is required
PHRASE = "prefix the pattern with '='"
if PHRASE in stdout:
return "="
# No prefix needed
return ""
def get_yosys_common_args():
return ["-e", "wire '[^']*' is assigned in a block", "-q"]
def get_output(params, no_common_args=False):
"""Run Yosys with given command line parameters, and return
stdout as a string. Raises CalledProcessError on a non-zero exit code."""
verbose = get_verbose()
cmd = [get_yosys()]
if not no_common_args:
cmd += get_yosys_common_args()
cmd += params
if verbose:
msg = ""
msg += "command".ljust(9).ljust(80, "=") + "\n"
msg += str(cmd)
print(msg)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Get the output
stdout, stderr = p.communicate()
stdout = stdout.decode("utf-8")
stderr = stderr.decode("utf-8")
retcode = p.wait()
if verbose:
msg = ""
if len(stdout):
msg += "stdout".ljust(9).ljust(80, "=") + "\n"
msg += stdout
if len(stderr):
msg += "stderr".ljust(9).ljust(80, "=") + "\n"
msg += stderr
msg += "exitcode".ljust(9).ljust(80, "=") + "\n"
msg += "{}\n".format(retcode)
msg += "=" * 80 + "\n"
print(msg)
if retcode != 0:
emsg = ""
emsg += "Yosys failed with exit code {}\n".format(retcode)
emsg += "Command: '{}'\n".format(" ".join(cmd))
emsg += "Message:\n"
emsg += "\n".join([" " + v for v in stderr.splitlines()])
raise subprocess.CalledProcessError(retcode, cmd, emsg)
return stdout
defines = []
includes = []
def add_define(defname):
"""Add a Verilog define to the list of defines to set in Yosys"""
defines.append(defname)
def get_defines():
"""Return a list of set Verilog defines, as a list of arguments
to pass to Yosys `read_verilog`"""
return " ".join(["-D" + _ for _ in defines])
def add_include(path):
""" Add a path to search when reading verilog to the list of
includes set in Yosys"""
includes.append(path)
def get_includes():
"""Return a list of include directories, as a list of arguments
to pass to Yosys `read_verilog`"""
return " ".join(["-I" + _ for _ in includes])
def commands(commands, infiles=[]):
"""Run a given string containing Yosys commands
Inputs
-------
commands : string of Yosys commands to run
infiles : list of input files
"""
commands = "read_verilog {} {} {}; ".format(
get_defines(), get_includes(), " ".join(infiles)
) + commands
params = ["-p", commands]
return get_output(params)
def script(script, infiles=[]):
"""Run a Yosys script given a path to the script
Inputs
-------
script : path to Yosys script to run
infiles : list of input files
"""
params = ["-s", script] + infiles
return get_output(params)
def vlog_to_json(
infiles, flatten=False, aig=False, mode=None, module_with_mode=None
):
"""
Convert Verilog to a JSON representation using Yosys
Inputs
-------
infiles : list of input files
flatten : set to flatten output hierarchy
aig : generate And-Inverter-Graph modules for gates
mode : set to a value other than None to use `chparam` to
set the value of the MODE parameter
module_with_mode : the name of the module to apply `mode` to
"""
prep_opts = "-flatten" if flatten else ""
json_opts = "-aig" if aig else ""
if mode is not None:
mode_str = 'chparam -set MODE "{}" {}; '.format(mode, module_with_mode)
else:
mode_str = ""
cmds = "{}prep {}; write_json {}".format(mode_str, prep_opts, json_opts)
try:
j = utils.strip_yosys_json(commands(cmds, infiles))
except subprocess.CalledProcessError as ex:
print(ex.output)
exit(-1)
return json.loads(j)
def extract_pin(module, pstr, _regex=re.compile(r"([^/]+)/([^/]+)")):
"""
Extract the pin from a line of the result of a Yosys select command, or
None if the command result is irrelevant (e.g. does not correspond to the
correct module)
Inputs
-------
module: Name of module to extract pins from
pstr: Line from Yosys select command (`module/pin` format)
"""
m = re.match(r"([^/]+)/([^/]+)", pstr)
if m and m.group(1) == module:
return m.group(2)
else:
return None
def do_select(infiles, module, expr, prep=False, flatten=False):
"""
Run a Yosys select command (given the expression and input files)
on a module and return the result as a list of pins
Inputs
-------
infiles: List of Verilog source files to pass to Yosys
module: Name of module to run command on
expr: Yosys selector expression for select command
prep: Run prep command before selecting.
flatten: Flatten module when running prep.
"""
# TODO: All of these functions involve a fairly large number of calls to
# Yosys. Although performance here is unlikely to be a major priority any
# time soon, it might be worth investigating better options?
f = ""
if flatten:
f = "-flatten"
p = ""
if prep:
p = "prep -top {} {};".format(module, f)
else:
p = "proc;"
outfile = tempfile.mktemp()
sel_cmd = "{} cd {}; select -write {} {}".format(p, module, outfile, expr)
try:
commands(sel_cmd, infiles)
except subprocess.CalledProcessError as ex:
print(ex.output)
exit(-1)
pins = []
with open(outfile, 'r') as f:
for net in f:
snet = net.strip()
if (len(snet) > 0):
pin = extract_pin(module, snet)
if pin is not None:
pins.append(pin)
os.remove(outfile)
return pins
def get_combinational_sinks(infiles, module, innet):
"""Return a list of output ports which are combinational sinks of a given
input.
Inputs
-------
infiles: List of Verilog source files to pass to Yosys
module: Name of module to run command on
innet: Name of input net to find sinks of
"""
return do_select(
infiles, module, "={} %co* =o:* %i ={} %d".format(innet, innet)
)
def list_clocks(infiles, module):
"""Return a list of clocks in the module
Inputs
-------
infiles: List of Verilog source files to pass to Yosys
module: Name of module to run command on
"""
return do_select(
infiles, module,
"=c:* %x:+[CLK]:+[clk]:+[clock]:+[CLOCK] =c:* %d =x:* %i"
)
def get_clock_assoc_signals(infiles, module, clk):
"""Return the list of signals associated with a given clock.
Inputs
-------
infiles: List of Verilog source files to pass to Yosys
module: Name of module to run command on
clk: Name of clock to find associated signals
"""
return do_select(
infiles, module,
"select -list ={} %a %co* %x =i:* =o:* %u %i =a:ASSOC_CLOCK={} %u ={} "
"%d".
format(clk, clk, clk)
)
# Find things which affect the given output
# show w:*D_IN_0 %a %ci*
# Find things which are affected by the given clock.
# show w:*INPUT_CLK %a %co*
# Find things which are affect by the given signal - combinational only.
# select -list w:*INPUT_CLK %a %co* %x x:* %i
def get_related_output_for_input(infiles, module, signal):
""".
Inputs
-------
infiles: List of Verilog source files to pass to Yosys
module: Name of module to run command on
clk: Name of clock to find associated signals
"""
return do_select(
infiles, module, "select -list =w:*{} %a %co* =o:* %i".format(signal)
)
def get_related_inputs_for_input(infiles, module, signal):
""".
Inputs
-------
infiles: List of Verilog source files to pass to Yosys
module: Name of module to run command on
clk: Name of clock to find associated signals
"""
return [
x for x in do_select(
infiles, module,
"select -list =w:*{} %a %co* %x =i:* %i".format(signal)
) if x != signal
]
|
the-stack_0_13144 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
from functools import partial
import hypothesis
from hypothesis import given, settings, seed, example, assume, reproduce_failure
import hypothesis.strategies as st
import numpy as np
class TestSplitOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.Host, [PrecisionType.FP32, PrecisionType.INT64],
DataLayoutType.NCHW,
thread=[1, 4])
opencl_places = [
Place(TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.FP32, DataLayoutType.NCHW),
Place(TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.Any, DataLayoutType.NCHW),
Place(TargetType.Host, PrecisionType.FP32)
]
self.enable_testing_on_place(places=opencl_places)
metal_places = [
Place(TargetType.Metal, PrecisionType.FP32,
DataLayoutType.MetalTexture2DArray),
Place(TargetType.Metal, PrecisionType.FP16,
DataLayoutType.MetalTexture2DArray),
Place(TargetType.ARM, PrecisionType.FP32),
Place(TargetType.Host, PrecisionType.FP32)
]
self.enable_testing_on_place(places=metal_places)
self.enable_testing_on_place(TargetType.NNAdapter, PrecisionType.FP32)
self.enable_devices_on_nnadapter(device_names=[
"kunlunxin_xtcl", "nvidia_tensorrt", "intel_openvino"
])
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
x_dtype = program_config.inputs["input_data"].dtype
#check config
if predictor_config.precision() == PrecisionType.INT64:
if x_dtype != np.int64:
return False
return True
def sample_program_configs(self, draw):
in_shape = draw(
st.sampled_from([[6, 9, 24], [6, 24, 24], [6, 24], [24, 24], [24]
]))
batch = draw(st.integers(min_value=1, max_value=10))
in_shape.insert(0, batch)
sections = draw(
st.sampled_from([[], [3, 3], [2, 4], [10, 14], [2, 2, 2],
[1, 3, 2], [3, 3, 3], [3, 7, 14]]))
input_num = draw(st.sampled_from([0, 1]))
num = draw(st.sampled_from([0, 2, 3]))
input_axis = draw(st.sampled_from([0, 1, 2, 3]))
input_type = draw(st.sampled_from(["float32", "int32", "int64"]))
Out = draw(
st.sampled_from([["output_var0", "output_var1"],
["output_var0", "output_var1", "output_var2"]]))
#Sections and num cannot both be equal to 0.
assume((num != 0 and len(sections) == 0) or (num == 0 and
len(sections) != 0))
# the dimensions of input and axis match
assume(input_axis < len(in_shape))
#When sections and num are not both equal to 0, sections has higher priority.
#The sum of sections should be equal to the input size.
if len(sections) != 0:
assume(len(Out) == len(sections))
assume(in_shape[input_axis] % len(sections) == 0)
sum = 0
for i in sections:
sum += i
assume(sum == in_shape[input_axis])
if num != 0:
assume(len(Out) == num)
assume(in_shape[input_axis] % num == 0)
if input_num == 0:
assume((len(in_shape) == 2) & (in_shape[1] == 24) & (
sections == [10, 14]) & (len(Out) == 2))
def generate_input(*args, **kwargs):
if input_type == "float32":
return np.random.normal(0.0, 1.0, in_shape).astype(np.float32)
elif input_type == "int32":
return np.random.normal(0.0, 1.0, in_shape).astype(np.int32)
elif input_type == "int64":
return np.random.normal(0.0, 1.0, in_shape).astype(np.int64)
def generate_AxisTensor(*args, **kwargs):
return np.ones([1]).astype(np.int32)
def generate_SectionsTensorList1(*args, **kwargs):
return np.array([10]).astype(np.int32)
def generate_SectionsTensorList2(*args, **kwargs):
return np.array([14]).astype(np.int32)
dics_intput = [{
"X": ["input_data"],
"AxisTensor": ["AxisTensor"],
"SectionsTensorList":
["SectionsTensorList1", "SectionsTensorList2"]
}, {
"X": ["input_data"]
}]
dics_weight = [{
"AxisTensor": TensorConfig(data_gen=partial(generate_AxisTensor)),
"SectionsTensorList1":
TensorConfig(data_gen=partial(generate_SectionsTensorList1)),
"SectionsTensorList2":
TensorConfig(data_gen=partial(generate_SectionsTensorList2))
}, {}]
ops_config = OpConfig(
type="split",
inputs=dics_intput[input_num],
outputs={"Out": Out},
attrs={"sections": sections,
"num": num,
"axis": input_axis})
program_config = ProgramConfig(
ops=[ops_config],
weights=dics_weight[input_num],
inputs={
"input_data": TensorConfig(data_gen=partial(generate_input))
},
outputs=Out)
return program_config
def sample_predictor_configs(self):
atol, rtol = 1e-5, 1e-5
config_lists = self.get_predictor_configs()
for config in config_lists:
if config.target() in [TargetType.Metal]:
atol, rtol = 1e-3, 1e-3
return self.get_predictor_configs(), ["split"], (atol, rtol)
def add_ignore_pass_case(self):
def teller1(program_config, predictor_config):
x_shape = list(program_config.inputs["input_data"].shape)
if predictor_config.target() == TargetType.Metal:
if len(x_shape) != 4:
return True
self.add_ignore_check_case(
teller1, IgnoreReasons.ACCURACY_ERROR,
"The op output has diff in a specific case. We need to fix it as soon as possible."
)
def teller2(program_config, predictor_config):
x_dtype = program_config.inputs["input_data"].dtype
x_shape = list(program_config.inputs["input_data"].shape)
out_shape = list(program_config.outputs)
axis = program_config.ops[0].attrs["axis"]
num = program_config.ops[0].attrs["num"]
if predictor_config.target() == TargetType.OpenCL:
if num != 2 or x_dtype != np.float32:
return True
if predictor_config.target() == TargetType.Metal:
if len(x_shape) == 2 or axis == 0 or axis == 1:
return True
if x_dtype != np.float32:
return True
self.add_ignore_check_case(
teller2, IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Lite does not support this op in a specific case. We need to fix it as soon as possible."
)
def _teller3(program_config, predictor_config):
if "nvidia_tensorrt" in self.get_nnadapter_device_name():
in_shape = program_config.inputs["input_data"].shape
axis = program_config.ops[0].attrs["axis"]
in_dtype = program_config.inputs["input_data"].dtype
if len(in_shape) == 1 or axis == 0 or in_dtype != np.float32:
return True
self.add_ignore_check_case(
_teller3, IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Lite does not support 'in_shape_size == 1' or 'axis == 0' or 'in_dtype != float32' on NvidiaTensorrt."
)
def test(self, *args, **kwargs):
target_str = self.get_target()
max_examples = 50
if target_str == "OpenCL":
# Make sure to generate enough valid cases for OpenCL
max_examples = 100
if target_str == "Metal":
# Make sure to generate enough valid cases for OpenCL
max_examples = 500
self.run_and_statis(
quant=False, min_success_num=25, max_examples=max_examples)
if __name__ == "__main__":
unittest.main(argv=[''])
|
the-stack_0_13146 | import asyncio
import json
import logging
import multiprocessing
import multiprocessing.context
import time
from collections import defaultdict
from pathlib import Path
from secrets import token_bytes
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
import aiosqlite
from blspy import G1Element, PrivateKey
from chia.consensus.coinbase import pool_parent_id, farmer_parent_id
from chia.consensus.constants import ConsensusConstants
from chia.pools.pool_puzzles import SINGLETON_LAUNCHER_HASH, solution_to_pool_state
from chia.pools.pool_wallet import PoolWallet
from chia.protocols import wallet_protocol
from chia.protocols.wallet_protocol import PuzzleSolutionResponse, RespondPuzzleSolution, CoinState
from chia.server.ws_connection import WSChiaConnection
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.program import Program
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.coin_spend import CoinSpend
from chia.types.full_block import FullBlock
from chia.types.mempool_inclusion_status import MempoolInclusionStatus
from chia.util.byte_types import hexstr_to_bytes
from chia.util.config import process_config_start_method
from chia.util.db_wrapper import DBWrapper
from chia.util.errors import Err
from chia.util.ints import uint32, uint64, uint128, uint8
from chia.util.db_synchronous import db_synchronous_on
from chia.wallet.cat_wallet.cat_utils import match_cat_puzzle, construct_cat_puzzle
from chia.wallet.cat_wallet.cat_wallet import CATWallet
from chia.wallet.cat_wallet.cat_constants import DEFAULT_CATS
from chia.wallet.derivation_record import DerivationRecord
from chia.wallet.derive_keys import master_sk_to_wallet_sk, master_sk_to_wallet_sk_unhardened
from chia.wallet.key_val_store import KeyValStore
from chia.wallet.puzzles.cat_loader import CAT_MOD
from chia.wallet.rl_wallet.rl_wallet import RLWallet
from chia.wallet.settings.user_settings import UserSettings
from chia.wallet.trade_manager import TradeManager
from chia.wallet.transaction_record import TransactionRecord
from chia.wallet.util.compute_hints import compute_coin_hints
from chia.wallet.util.transaction_type import TransactionType
from chia.wallet.util.wallet_sync_utils import last_change_height_cs
from chia.wallet.util.wallet_types import WalletType
from chia.wallet.wallet import Wallet
from chia.wallet.wallet_action import WalletAction
from chia.wallet.wallet_action_store import WalletActionStore
from chia.wallet.wallet_blockchain import WalletBlockchain
from chia.wallet.wallet_coin_record import WalletCoinRecord
from chia.wallet.wallet_coin_store import WalletCoinStore
from chia.wallet.wallet_info import WalletInfo
from chia.wallet.wallet_interested_store import WalletInterestedStore
from chia.wallet.wallet_pool_store import WalletPoolStore
from chia.wallet.wallet_puzzle_store import WalletPuzzleStore
from chia.wallet.wallet_sync_store import WalletSyncStore
from chia.wallet.wallet_transaction_store import WalletTransactionStore
from chia.wallet.wallet_user_store import WalletUserStore
from chia.server.server import ChiaServer
from chia.wallet.did_wallet.did_wallet import DIDWallet
from chia.wallet.wallet_weight_proof_handler import WalletWeightProofHandler
class WalletStateManager:
constants: ConsensusConstants
config: Dict
tx_store: WalletTransactionStore
puzzle_store: WalletPuzzleStore
user_store: WalletUserStore
action_store: WalletActionStore
basic_store: KeyValStore
start_index: int
# Makes sure only one asyncio thread is changing the blockchain state at one time
lock: asyncio.Lock
log: logging.Logger
# TODO Don't allow user to send tx until wallet is synced
sync_mode: bool
sync_target: uint32
genesis: FullBlock
state_changed_callback: Optional[Callable]
pending_tx_callback: Optional[Callable]
puzzle_hash_created_callbacks: Dict = defaultdict(lambda *x: None)
db_path: Path
db_connection: aiosqlite.Connection
db_wrapper: DBWrapper
main_wallet: Wallet
wallets: Dict[uint32, Any]
private_key: PrivateKey
trade_manager: TradeManager
new_wallet: bool
user_settings: UserSettings
blockchain: WalletBlockchain
coin_store: WalletCoinStore
sync_store: WalletSyncStore
finished_sync_up_to: uint32
interested_store: WalletInterestedStore
multiprocessing_context: multiprocessing.context.BaseContext
weight_proof_handler: WalletWeightProofHandler
server: ChiaServer
root_path: Path
wallet_node: Any
pool_store: WalletPoolStore
default_cats: Dict[str, Any]
@staticmethod
async def create(
private_key: PrivateKey,
config: Dict,
db_path: Path,
constants: ConsensusConstants,
server: ChiaServer,
root_path: Path,
wallet_node,
name: str = None,
):
self = WalletStateManager()
self.new_wallet = False
self.config = config
self.constants = constants
self.server = server
self.root_path = root_path
self.log = logging.getLogger(name if name else __name__)
self.lock = asyncio.Lock()
self.log.debug(f"Starting in db path: {db_path}")
self.db_connection = await aiosqlite.connect(db_path)
await self.db_connection.execute("pragma journal_mode=wal")
await self.db_connection.execute(
"pragma synchronous={}".format(db_synchronous_on(self.config.get("db_sync", "auto"), db_path))
)
self.db_wrapper = DBWrapper(self.db_connection)
self.coin_store = await WalletCoinStore.create(self.db_wrapper)
self.tx_store = await WalletTransactionStore.create(self.db_wrapper)
self.puzzle_store = await WalletPuzzleStore.create(self.db_wrapper)
self.user_store = await WalletUserStore.create(self.db_wrapper)
self.action_store = await WalletActionStore.create(self.db_wrapper)
self.basic_store = await KeyValStore.create(self.db_wrapper)
self.trade_manager = await TradeManager.create(self, self.db_wrapper)
self.user_settings = await UserSettings.create(self.basic_store)
self.pool_store = await WalletPoolStore.create(self.db_wrapper)
self.interested_store = await WalletInterestedStore.create(self.db_wrapper)
self.default_cats = DEFAULT_CATS
self.wallet_node = wallet_node
self.sync_mode = False
self.sync_target = uint32(0)
self.finished_sync_up_to = uint32(0)
multiprocessing_start_method = process_config_start_method(config=self.config, log=self.log)
self.multiprocessing_context = multiprocessing.get_context(method=multiprocessing_start_method)
self.weight_proof_handler = WalletWeightProofHandler(
constants=self.constants,
multiprocessing_context=self.multiprocessing_context,
)
self.blockchain = await WalletBlockchain.create(self.basic_store, self.constants, self.weight_proof_handler)
self.state_changed_callback = None
self.pending_tx_callback = None
self.db_path = db_path
main_wallet_info = await self.user_store.get_wallet_by_id(1)
assert main_wallet_info is not None
self.private_key = private_key
self.main_wallet = await Wallet.create(self, main_wallet_info)
self.wallets = {main_wallet_info.id: self.main_wallet}
wallet = None
for wallet_info in await self.get_all_wallet_info_entries():
if wallet_info.type == WalletType.STANDARD_WALLET:
if wallet_info.id == 1:
continue
wallet = await Wallet.create(self, wallet_info)
elif wallet_info.type == WalletType.CAT:
wallet = await CATWallet.create(
self,
self.main_wallet,
wallet_info,
)
elif wallet_info.type == WalletType.RATE_LIMITED:
wallet = await RLWallet.create(self, wallet_info)
elif wallet_info.type == WalletType.DISTRIBUTED_ID:
wallet = await DIDWallet.create(
self,
self.main_wallet,
wallet_info,
)
elif wallet_info.type == WalletType.POOLING_WALLET:
wallet = await PoolWallet.create_from_db(
self,
self.main_wallet,
wallet_info,
)
if wallet is not None:
self.wallets[wallet_info.id] = wallet
return self
def get_derivation_index(self, pubkey: G1Element, max_depth: int = 1000) -> int:
for i in range(0, max_depth):
derived = self.get_public_key(uint32(i))
if derived == pubkey:
return i
derived = self.get_public_key_unhardened(uint32(i))
if derived == pubkey:
return i
return -1
def get_public_key(self, index: uint32) -> G1Element:
return master_sk_to_wallet_sk(self.private_key, index).get_g1()
def get_public_key_unhardened(self, index: uint32) -> G1Element:
return master_sk_to_wallet_sk_unhardened(self.private_key, index).get_g1()
async def get_keys(self, puzzle_hash: bytes32) -> Optional[Tuple[G1Element, PrivateKey]]:
record = await self.puzzle_store.record_for_puzzle_hash(puzzle_hash)
if record is None:
raise ValueError(f"No key for this puzzlehash {puzzle_hash})")
if record.hardened:
private = master_sk_to_wallet_sk(self.private_key, record.index)
pubkey = private.get_g1()
return pubkey, private
private = master_sk_to_wallet_sk_unhardened(self.private_key, record.index)
pubkey = private.get_g1()
return pubkey, private
async def create_more_puzzle_hashes(self, from_zero: bool = False, in_transaction=False):
"""
For all wallets in the user store, generates the first few puzzle hashes so
that we can restore the wallet from only the private keys.
"""
targets = list(self.wallets.keys())
unused: Optional[uint32] = await self.puzzle_store.get_unused_derivation_path()
if unused is None:
# This handles the case where the database has entries but they have all been used
unused = await self.puzzle_store.get_last_derivation_path()
if unused is None:
# This handles the case where the database is empty
unused = uint32(0)
to_generate = self.config["initial_num_public_keys"]
for wallet_id in targets:
target_wallet = self.wallets[wallet_id]
last: Optional[uint32] = await self.puzzle_store.get_last_derivation_path_for_wallet(wallet_id)
start_index = 0
derivation_paths: List[DerivationRecord] = []
if last is not None:
start_index = last + 1
# If the key was replaced (from_zero=True), we should generate the puzzle hashes for the new key
if from_zero:
start_index = 0
for index in range(start_index, unused + to_generate):
if WalletType(target_wallet.type()) == WalletType.POOLING_WALLET:
continue
# Hardened
pubkey: G1Element = self.get_public_key(uint32(index))
puzzle: Program = target_wallet.puzzle_for_pk(bytes(pubkey))
if puzzle is None:
self.log.error(f"Unable to create puzzles with wallet {target_wallet}")
break
puzzlehash: bytes32 = puzzle.get_tree_hash()
self.log.info(f"Puzzle at index {index} wallet ID {wallet_id} puzzle hash {puzzlehash.hex()}")
derivation_paths.append(
DerivationRecord(
uint32(index), puzzlehash, pubkey, target_wallet.type(), uint32(target_wallet.id()), True
)
)
# Unhardened
pubkey_unhardened: G1Element = self.get_public_key_unhardened(uint32(index))
puzzle_unhardened: Program = target_wallet.puzzle_for_pk(bytes(pubkey_unhardened))
if puzzle_unhardened is None:
self.log.error(f"Unable to create puzzles with wallet {target_wallet}")
break
puzzlehash_unhardened: bytes32 = puzzle_unhardened.get_tree_hash()
self.log.info(
f"Puzzle at index {index} wallet ID {wallet_id} puzzle hash {puzzlehash_unhardened.hex()}"
)
derivation_paths.append(
DerivationRecord(
uint32(index),
puzzlehash_unhardened,
pubkey_unhardened,
target_wallet.type(),
uint32(target_wallet.id()),
False,
)
)
await self.puzzle_store.add_derivation_paths(derivation_paths, in_transaction)
await self.add_interested_puzzle_hashes(
[record.puzzle_hash for record in derivation_paths],
[record.wallet_id for record in derivation_paths],
in_transaction,
)
if unused > 0:
await self.puzzle_store.set_used_up_to(uint32(unused - 1), in_transaction)
async def update_wallet_puzzle_hashes(self, wallet_id):
derivation_paths: List[DerivationRecord] = []
target_wallet = self.wallets[wallet_id]
last: Optional[uint32] = await self.puzzle_store.get_last_derivation_path_for_wallet(wallet_id)
unused: Optional[uint32] = await self.puzzle_store.get_unused_derivation_path()
if unused is None:
# This handles the case where the database has entries but they have all been used
unused = await self.puzzle_store.get_last_derivation_path()
if unused is None:
# This handles the case where the database is empty
unused = uint32(0)
for index in range(unused, last):
# Since DID are not released yet we can assume they are only using unhardened keys derivation
pubkey: G1Element = self.get_public_key_unhardened(uint32(index))
puzzle: Program = target_wallet.puzzle_for_pk(bytes(pubkey))
puzzlehash: bytes32 = puzzle.get_tree_hash()
self.log.info(f"Generating public key at index {index} puzzle hash {puzzlehash.hex()}")
derivation_paths.append(
DerivationRecord(
uint32(index),
puzzlehash,
pubkey,
target_wallet.wallet_info.type,
uint32(target_wallet.wallet_info.id),
False,
)
)
await self.puzzle_store.add_derivation_paths(derivation_paths)
async def get_unused_derivation_record(
self, wallet_id: uint32, in_transaction=False, hardened=False
) -> DerivationRecord:
"""
Creates a puzzle hash for the given wallet, and then makes more puzzle hashes
for every wallet to ensure we always have more in the database. Never reusue the
same public key more than once (for privacy).
"""
async with self.puzzle_store.lock:
# If we have no unused public keys, we will create new ones
unused: Optional[uint32] = await self.puzzle_store.get_unused_derivation_path()
if unused is None:
await self.create_more_puzzle_hashes()
# Now we must have unused public keys
unused = await self.puzzle_store.get_unused_derivation_path()
assert unused is not None
record: Optional[DerivationRecord] = await self.puzzle_store.get_derivation_record(
unused, wallet_id, hardened
)
assert record is not None
# Set this key to used so we never use it again
await self.puzzle_store.set_used_up_to(record.index, in_transaction=in_transaction)
# Create more puzzle hashes / keys
await self.create_more_puzzle_hashes(in_transaction=in_transaction)
return record
async def get_current_derivation_record_for_wallet(self, wallet_id: uint32) -> Optional[DerivationRecord]:
async with self.puzzle_store.lock:
# If we have no unused public keys, we will create new ones
current: Optional[DerivationRecord] = await self.puzzle_store.get_current_derivation_record_for_wallet(
wallet_id
)
return current
def set_callback(self, callback: Callable):
"""
Callback to be called when the state of the wallet changes.
"""
self.state_changed_callback = callback
def set_pending_callback(self, callback: Callable):
"""
Callback to be called when new pending transaction enters the store
"""
self.pending_tx_callback = callback
def set_coin_with_puzzlehash_created_callback(self, puzzlehash: bytes32, callback: Callable):
"""
Callback to be called when new coin is seen with specified puzzlehash
"""
self.puzzle_hash_created_callbacks[puzzlehash] = callback
async def puzzle_hash_created(self, coin: Coin):
callback = self.puzzle_hash_created_callbacks[coin.puzzle_hash]
if callback is None:
return None
await callback(coin)
def state_changed(self, state: str, wallet_id: int = None, data_object=None):
"""
Calls the callback if it's present.
"""
if data_object is None:
data_object = {}
if self.state_changed_callback is None:
return None
self.state_changed_callback(state, wallet_id, data_object)
def tx_pending_changed(self) -> None:
"""
Notifies the wallet node that there's new tx pending
"""
if self.pending_tx_callback is None:
return None
self.pending_tx_callback()
async def synced(self):
latest = await self.blockchain.get_peak_block()
if latest is None:
return False
if latest.height - await self.blockchain.get_finished_sync_up_to() > 1:
return False
latest_timestamp = self.blockchain.get_latest_timestamp()
if latest_timestamp > int(time.time()) - 10 * 60:
return True
return False
def set_sync_mode(self, mode: bool, sync_height: uint32 = uint32(0)):
"""
Sets the sync mode. This changes the behavior of the wallet node.
"""
self.sync_mode = mode
self.sync_target = sync_height
self.state_changed("sync_changed")
async def get_confirmed_spendable_balance_for_wallet(self, wallet_id: int, unspent_records=None) -> uint128:
"""
Returns the balance amount of all coins that are spendable.
"""
spendable: Set[WalletCoinRecord] = await self.get_spendable_coins_for_wallet(wallet_id, unspent_records)
spendable_amount: uint128 = uint128(0)
for record in spendable:
spendable_amount = uint128(spendable_amount + record.coin.amount)
return spendable_amount
async def does_coin_belong_to_wallet(self, coin: Coin, wallet_id: int) -> bool:
"""
Returns true if we have the key for this coin.
"""
info = await self.puzzle_store.wallet_info_for_puzzle_hash(coin.puzzle_hash)
if info is None:
return False
coin_wallet_id, wallet_type = info
if wallet_id == coin_wallet_id:
return True
return False
async def get_confirmed_balance_for_wallet(
self,
wallet_id: int,
unspent_coin_records: Optional[Set[WalletCoinRecord]] = None,
) -> uint128:
"""
Returns the confirmed balance, including coinbase rewards that are not spendable.
"""
# lock only if unspent_coin_records is None
if unspent_coin_records is None:
unspent_coin_records = await self.coin_store.get_unspent_coins_for_wallet(wallet_id)
return uint128(sum(cr.coin.amount for cr in unspent_coin_records))
async def get_unconfirmed_balance(
self, wallet_id: int, unspent_coin_records: Optional[Set[WalletCoinRecord]] = None
) -> uint128:
"""
Returns the balance, including coinbase rewards that are not spendable, and unconfirmed
transactions.
"""
# This API should change so that get_balance_from_coin_records is called for Set[WalletCoinRecord]
# and this method is called only for the unspent_coin_records==None case.
if unspent_coin_records is None:
unspent_coin_records = await self.coin_store.get_unspent_coins_for_wallet(wallet_id)
unconfirmed_tx: List[TransactionRecord] = await self.tx_store.get_unconfirmed_for_wallet(wallet_id)
all_unspent_coins: Set[Coin] = {cr.coin for cr in unspent_coin_records}
for record in unconfirmed_tx:
for addition in record.additions:
# This change or a self transaction
if await self.does_coin_belong_to_wallet(addition, wallet_id):
all_unspent_coins.add(addition)
for removal in record.removals:
if await self.does_coin_belong_to_wallet(removal, wallet_id) and removal in all_unspent_coins:
all_unspent_coins.remove(removal)
return uint128(sum(coin.amount for coin in all_unspent_coins))
async def unconfirmed_removals_for_wallet(self, wallet_id: int) -> Dict[bytes32, Coin]:
"""
Returns new removals transactions that have not been confirmed yet.
"""
removals: Dict[bytes32, Coin] = {}
unconfirmed_tx = await self.tx_store.get_unconfirmed_for_wallet(wallet_id)
for record in unconfirmed_tx:
for coin in record.removals:
removals[coin.name()] = coin
return removals
async def fetch_parent_and_check_for_cat(
self, peer: WSChiaConnection, coin_state: CoinState, fork_height: Optional[uint32]
) -> Tuple[Optional[uint32], Optional[WalletType]]:
if self.is_pool_reward(coin_state.created_height, coin_state.coin.parent_coin_info) or self.is_farmer_reward(
coin_state.created_height, coin_state.coin.parent_coin_info
):
return None, None
response: List[CoinState] = await self.wallet_node.get_coin_state(
[coin_state.coin.parent_coin_info], fork_height, peer
)
if len(response) == 0:
self.log.warning(f"Could not find a parent coin with ID: {coin_state.coin.parent_coin_info}")
return None, None
parent_coin_state = response[0]
assert parent_coin_state.spent_height == coin_state.created_height
wallet_id = None
wallet_type = None
cs: Optional[CoinSpend] = await self.wallet_node.fetch_puzzle_solution(
peer, parent_coin_state.spent_height, parent_coin_state.coin
)
if cs is None:
return None, None
matched, curried_args = match_cat_puzzle(Program.from_bytes(bytes(cs.puzzle_reveal)))
if matched:
mod_hash, tail_hash, inner_puzzle = curried_args
inner_puzzle_hash = inner_puzzle.get_tree_hash()
self.log.info(
f"parent: {parent_coin_state.coin.name()} inner_puzzle_hash for parent is {inner_puzzle_hash}"
)
hint_list = compute_coin_hints(cs)
derivation_record = None
for hint in hint_list:
derivation_record = await self.puzzle_store.get_derivation_record_for_puzzle_hash(bytes32(hint))
if derivation_record is not None:
break
if derivation_record is None:
self.log.info(f"Received state for the coin that doesn't belong to us {coin_state}")
else:
our_inner_puzzle: Program = self.main_wallet.puzzle_for_pk(bytes(derivation_record.pubkey))
cat_puzzle = construct_cat_puzzle(CAT_MOD, bytes32(bytes(tail_hash)[1:]), our_inner_puzzle)
if cat_puzzle.get_tree_hash() != coin_state.coin.puzzle_hash:
return None, None
if bytes(tail_hash).hex()[2:] in self.default_cats or self.config.get(
"automatically_add_unknown_cats", False
):
cat_wallet = await CATWallet.create_wallet_for_cat(
self, self.main_wallet, bytes(tail_hash).hex()[2:], in_transaction=True
)
wallet_id = cat_wallet.id()
wallet_type = WalletType(cat_wallet.type())
self.state_changed("wallet_created")
return wallet_id, wallet_type
async def new_coin_state(
self, coin_states: List[CoinState], peer: WSChiaConnection, fork_height: Optional[uint32]
) -> None:
# TODO: add comment about what this method does
# Input states should already be sorted by cs_height, with reorgs at the beginning
curr_h = -1
for c_state in coin_states:
last_change_height = last_change_height_cs(c_state)
if last_change_height < curr_h:
raise ValueError("Input coin_states is not sorted properly")
curr_h = last_change_height
all_txs_per_wallet: Dict[int, List[TransactionRecord]] = {}
trade_removals = await self.trade_manager.get_coins_of_interest()
all_unconfirmed: List[TransactionRecord] = await self.tx_store.get_all_unconfirmed()
trade_coin_removed: List[CoinState] = []
for coin_state_idx, coin_state in enumerate(coin_states):
wallet_info: Optional[Tuple[uint32, WalletType]] = await self.get_wallet_id_for_puzzle_hash(
coin_state.coin.puzzle_hash
)
local_record: Optional[WalletCoinRecord] = await self.coin_store.get_coin_record(coin_state.coin.name())
self.log.debug(f"{coin_state.coin.name()}: {coin_state}")
# If we already have this coin, and it was spent and confirmed at the same heights, then we return (done)
if local_record is not None:
local_spent = None
if local_record.spent_block_height != 0:
local_spent = local_record.spent_block_height
if (
local_spent == coin_state.spent_height
and local_record.confirmed_block_height == coin_state.created_height
):
continue
wallet_id: Optional[uint32] = None
wallet_type: Optional[WalletType] = None
if wallet_info is not None:
wallet_id, wallet_type = wallet_info
elif local_record is not None:
wallet_id = uint32(local_record.wallet_id)
wallet_type = local_record.wallet_type
elif coin_state.created_height is not None:
wallet_id, wallet_type = await self.fetch_parent_and_check_for_cat(peer, coin_state, fork_height)
if wallet_id is None or wallet_type is None:
self.log.info(f"No wallet for coin state: {coin_state}")
continue
if wallet_id in all_txs_per_wallet:
all_txs = all_txs_per_wallet[wallet_id]
else:
all_txs = await self.tx_store.get_all_transactions_for_wallet(wallet_id)
all_txs_per_wallet[wallet_id] = all_txs
all_outgoing = [tx for tx in all_txs if "OUTGOING" in TransactionType(tx.type).name]
derivation_index = await self.puzzle_store.index_for_puzzle_hash(coin_state.coin.puzzle_hash)
if derivation_index is not None:
await self.puzzle_store.set_used_up_to(derivation_index, True)
if coin_state.created_height is None:
# TODO implements this coin got reorged
# TODO: we need to potentially roll back the pool wallet here
pass
elif coin_state.created_height is not None and coin_state.spent_height is None:
await self.coin_added(coin_state.coin, coin_state.created_height, all_txs, wallet_id, wallet_type)
elif coin_state.created_height is not None and coin_state.spent_height is not None:
self.log.info(f"Coin Removed: {coin_state}")
record = await self.coin_store.get_coin_record(coin_state.coin.name())
if coin_state.coin.name() in trade_removals:
trade_coin_removed.append(coin_state)
children: Optional[List[CoinState]] = None
if record is None:
farmer_reward = False
pool_reward = False
tx_type: int
if self.is_farmer_reward(coin_state.created_height, coin_state.coin.parent_coin_info):
farmer_reward = True
tx_type = TransactionType.FEE_REWARD.value
elif self.is_pool_reward(coin_state.created_height, coin_state.coin.parent_coin_info):
pool_reward = True
tx_type = TransactionType.COINBASE_REWARD.value
else:
tx_type = TransactionType.INCOMING_TX.value
record = WalletCoinRecord(
coin_state.coin,
coin_state.created_height,
coin_state.spent_height,
True,
farmer_reward or pool_reward,
wallet_type,
wallet_id,
)
await self.coin_store.add_coin_record(record)
# Coin first received
coin_record: Optional[WalletCoinRecord] = await self.coin_store.get_coin_record(
coin_state.coin.parent_coin_info
)
if coin_record is not None and wallet_type.value == coin_record.wallet_type:
change = True
else:
change = False
if not change:
created_timestamp = await self.wallet_node.get_timestamp_for_height(coin_state.created_height)
tx_record = TransactionRecord(
confirmed_at_height=coin_state.created_height,
created_at_time=uint64(created_timestamp),
to_puzzle_hash=(await self.convert_puzzle_hash(wallet_id, coin_state.coin.puzzle_hash)),
amount=uint64(coin_state.coin.amount),
fee_amount=uint64(0),
confirmed=True,
sent=uint32(0),
spend_bundle=None,
additions=[coin_state.coin],
removals=[],
wallet_id=wallet_id,
sent_to=[],
trade_id=None,
type=uint32(tx_type),
name=bytes32(token_bytes()),
memos=[],
)
await self.tx_store.add_transaction_record(tx_record, True)
children = await self.wallet_node.fetch_children(peer, coin_state.coin.name(), fork_height)
assert children is not None
additions = [state.coin for state in children]
if len(children) > 0:
fee = 0
to_puzzle_hash = None
# Find coin that doesn't belong to us
amount = 0
for coin in additions:
derivation_record = await self.puzzle_store.get_derivation_record_for_puzzle_hash(
coin.puzzle_hash
)
if derivation_record is None:
to_puzzle_hash = coin.puzzle_hash
amount += coin.amount
if to_puzzle_hash is None:
to_puzzle_hash = additions[0].puzzle_hash
spent_timestamp = await self.wallet_node.get_timestamp_for_height(coin_state.spent_height)
# Reorg rollback adds reorged transactions so it's possible there is tx_record already
# Even though we are just adding coin record to the db (after reorg)
tx_records: List[TransactionRecord] = []
for out_tx_record in all_outgoing:
for rem_coin in out_tx_record.removals:
if rem_coin.name() == coin_state.coin.name():
tx_records.append(out_tx_record)
if len(tx_records) > 0:
for tx_record in tx_records:
await self.tx_store.set_confirmed(tx_record.name, coin_state.spent_height)
else:
tx_record = TransactionRecord(
confirmed_at_height=coin_state.spent_height,
created_at_time=uint64(spent_timestamp),
to_puzzle_hash=(await self.convert_puzzle_hash(wallet_id, to_puzzle_hash)),
amount=uint64(int(amount)),
fee_amount=uint64(fee),
confirmed=True,
sent=uint32(0),
spend_bundle=None,
additions=additions,
removals=[coin_state.coin],
wallet_id=wallet_id,
sent_to=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=bytes32(token_bytes()),
memos=[],
)
await self.tx_store.add_transaction_record(tx_record, True)
else:
await self.coin_store.set_spent(coin_state.coin.name(), coin_state.spent_height)
rem_tx_records: List[TransactionRecord] = []
for out_tx_record in all_outgoing:
for rem_coin in out_tx_record.removals:
if rem_coin.name() == coin_state.coin.name():
rem_tx_records.append(out_tx_record)
for tx_record in rem_tx_records:
await self.tx_store.set_confirmed(tx_record.name, coin_state.spent_height)
for unconfirmed_record in all_unconfirmed:
for rem_coin in unconfirmed_record.removals:
if rem_coin.name() == coin_state.coin.name():
self.log.info(f"Setting tx_id: {unconfirmed_record.name} to confirmed")
await self.tx_store.set_confirmed(unconfirmed_record.name, coin_state.spent_height)
if record.wallet_type == WalletType.POOLING_WALLET:
if coin_state.spent_height is not None and coin_state.coin.amount == uint64(1):
wallet = self.wallets[uint32(record.wallet_id)]
curr_coin_state: CoinState = coin_state
while curr_coin_state.spent_height is not None:
cs: CoinSpend = await self.wallet_node.fetch_puzzle_solution(
peer, curr_coin_state.spent_height, curr_coin_state.coin
)
success = await wallet.apply_state_transition(cs, curr_coin_state.spent_height)
if not success:
break
new_singleton_coin: Optional[Coin] = wallet.get_next_interesting_coin(cs)
if new_singleton_coin is None:
# No more singleton (maybe destroyed?)
break
await self.coin_added(
new_singleton_coin,
coin_state.spent_height,
[],
uint32(record.wallet_id),
record.wallet_type,
)
await self.coin_store.set_spent(curr_coin_state.coin.name(), curr_coin_state.spent_height)
await self.add_interested_coin_ids([new_singleton_coin.name()], True)
new_coin_state: List[CoinState] = await self.wallet_node.get_coin_state(
[new_singleton_coin.name()], fork_height, peer
)
assert len(new_coin_state) == 1
curr_coin_state = new_coin_state[0]
# Check if a child is a singleton launcher
if children is None:
children = await self.wallet_node.fetch_children(peer, coin_state.coin.name(), fork_height)
assert children is not None
for child in children:
if child.coin.puzzle_hash != SINGLETON_LAUNCHER_HASH:
continue
if await self.have_a_pool_wallet_with_launched_id(child.coin.name()):
continue
if child.spent_height is None:
# TODO handle spending launcher later block
continue
launcher_spend: Optional[CoinSpend] = await self.wallet_node.fetch_puzzle_solution(
peer, coin_state.spent_height, child.coin
)
if launcher_spend is None:
continue
try:
pool_state = solution_to_pool_state(launcher_spend)
except Exception as e:
self.log.debug(f"Not a pool wallet launcher {e}")
continue
# solution_to_pool_state may return None but this may not be an error
if pool_state is None:
self.log.debug("solution_to_pool_state returned None, ignore and continue")
continue
assert child.spent_height is not None
pool_wallet = await PoolWallet.create(
self,
self.main_wallet,
child.coin.name(),
[launcher_spend],
child.spent_height,
True,
"pool_wallet",
)
launcher_spend_additions = launcher_spend.additions()
assert len(launcher_spend_additions) == 1
coin_added = launcher_spend_additions[0]
await self.coin_added(
coin_added, coin_state.spent_height, [], pool_wallet.id(), WalletType(pool_wallet.type())
)
await self.add_interested_coin_ids([coin_added.name()], True)
else:
raise RuntimeError("All cases already handled") # Logic error, all cases handled
for coin_state_removed in trade_coin_removed:
await self.trade_manager.coins_of_interest_farmed(coin_state_removed, fork_height)
async def have_a_pool_wallet_with_launched_id(self, launcher_id: bytes32) -> bool:
for wallet_id, wallet in self.wallets.items():
if (
wallet.type() == WalletType.POOLING_WALLET
and (await wallet.get_current_state()).launcher_id == launcher_id
):
self.log.warning("Already have, not recreating")
return True
return False
def is_pool_reward(self, created_height, parent_id):
for i in range(0, 30):
try_height = created_height - i
if try_height < 0:
break
calculated = pool_parent_id(try_height, self.constants.GENESIS_CHALLENGE)
if calculated == parent_id:
return True
return False
def is_farmer_reward(self, created_height, parent_id):
for i in range(0, 30):
try_height = created_height - i
if try_height < 0:
break
calculated = farmer_parent_id(try_height, self.constants.GENESIS_CHALLENGE)
if calculated == parent_id:
return True
return False
async def get_wallet_id_for_puzzle_hash(self, puzzle_hash: bytes32) -> Optional[Tuple[uint32, WalletType]]:
info = await self.puzzle_store.wallet_info_for_puzzle_hash(puzzle_hash)
if info is not None:
wallet_id, wallet_type = info
return uint32(wallet_id), wallet_type
interested_wallet_id = await self.interested_store.get_interested_puzzle_hash_wallet_id(puzzle_hash=puzzle_hash)
if interested_wallet_id is not None:
wallet_id = uint32(interested_wallet_id)
if wallet_id not in self.wallets.keys():
self.log.warning(f"Do not have wallet {wallet_id} for puzzle_hash {puzzle_hash}")
return None
wallet_type = WalletType(self.wallets[uint32(wallet_id)].type())
return uint32(wallet_id), wallet_type
return None
async def coin_added(
self,
coin: Coin,
height: uint32,
all_outgoing_transaction_records: List[TransactionRecord],
wallet_id: uint32,
wallet_type: WalletType,
) -> Optional[WalletCoinRecord]:
"""
Adding coin to DB, return wallet coin record if it get's added
"""
existing: Optional[WalletCoinRecord] = await self.coin_store.get_coin_record(coin.name())
if existing is not None:
return None
self.log.info(f"Adding coin: {coin} at {height} wallet_id:{wallet_id}")
farmer_reward = False
pool_reward = False
if self.is_farmer_reward(height, coin.parent_coin_info):
farmer_reward = True
elif self.is_pool_reward(height, coin.parent_coin_info):
pool_reward = True
farm_reward = False
coin_record: Optional[WalletCoinRecord] = await self.coin_store.get_coin_record(coin.parent_coin_info)
if coin_record is not None and wallet_type.value == coin_record.wallet_type:
change = True
else:
change = False
if farmer_reward or pool_reward:
farm_reward = True
if pool_reward:
tx_type: int = TransactionType.COINBASE_REWARD.value
else:
tx_type = TransactionType.FEE_REWARD.value
timestamp = await self.wallet_node.get_timestamp_for_height(height)
tx_record = TransactionRecord(
confirmed_at_height=uint32(height),
created_at_time=timestamp,
to_puzzle_hash=(await self.convert_puzzle_hash(wallet_id, coin.puzzle_hash)),
amount=coin.amount,
fee_amount=uint64(0),
confirmed=True,
sent=uint32(0),
spend_bundle=None,
additions=[coin],
removals=[],
wallet_id=wallet_id,
sent_to=[],
trade_id=None,
type=uint32(tx_type),
name=coin.name(),
memos=[],
)
await self.tx_store.add_transaction_record(tx_record, True)
else:
records: List[TransactionRecord] = []
for record in all_outgoing_transaction_records:
for add_coin in record.additions:
if add_coin.name() == coin.name():
records.append(record)
if len(records) > 0:
for record in records:
if record.confirmed is False:
await self.tx_store.set_confirmed(record.name, height)
elif not change:
timestamp = await self.wallet_node.get_timestamp_for_height(height)
tx_record = TransactionRecord(
confirmed_at_height=uint32(height),
created_at_time=timestamp,
to_puzzle_hash=(await self.convert_puzzle_hash(wallet_id, coin.puzzle_hash)),
amount=coin.amount,
fee_amount=uint64(0),
confirmed=True,
sent=uint32(0),
spend_bundle=None,
additions=[coin],
removals=[],
wallet_id=wallet_id,
sent_to=[],
trade_id=None,
type=uint32(TransactionType.INCOMING_TX.value),
name=coin.name(),
memos=[],
)
if coin.amount > 0:
await self.tx_store.add_transaction_record(tx_record, True)
coin_record_1: WalletCoinRecord = WalletCoinRecord(
coin, height, uint32(0), False, farm_reward, wallet_type, wallet_id
)
await self.coin_store.add_coin_record(coin_record_1)
if wallet_type == WalletType.CAT or wallet_type == WalletType.DISTRIBUTED_ID:
wallet = self.wallets[wallet_id]
await wallet.coin_added(coin, height)
await self.create_more_puzzle_hashes(in_transaction=True)
return coin_record_1
async def add_pending_transaction(self, tx_record: TransactionRecord):
"""
Called from wallet before new transaction is sent to the full_node
"""
# Wallet node will use this queue to retry sending this transaction until full nodes receives it
await self.tx_store.add_transaction_record(tx_record, False)
all_coins_names = []
all_coins_names.extend([coin.name() for coin in tx_record.additions])
all_coins_names.extend([coin.name() for coin in tx_record.removals])
await self.add_interested_coin_ids(all_coins_names, False)
self.tx_pending_changed()
self.state_changed("pending_transaction", tx_record.wallet_id)
async def add_transaction(self, tx_record: TransactionRecord, in_transaction=False):
"""
Called from wallet to add transaction that is not being set to full_node
"""
await self.tx_store.add_transaction_record(tx_record, in_transaction)
self.state_changed("pending_transaction", tx_record.wallet_id)
async def remove_from_queue(
self,
spendbundle_id: bytes32,
name: str,
send_status: MempoolInclusionStatus,
error: Optional[Err],
):
"""
Full node received our transaction, no need to keep it in queue anymore
"""
updated = await self.tx_store.increment_sent(spendbundle_id, name, send_status, error)
if updated:
tx: Optional[TransactionRecord] = await self.get_transaction(spendbundle_id)
if tx is not None:
self.state_changed("tx_update", tx.wallet_id, {"transaction": tx})
async def get_all_transactions(self, wallet_id: int) -> List[TransactionRecord]:
"""
Retrieves all confirmed and pending transactions
"""
records = await self.tx_store.get_all_transactions_for_wallet(wallet_id)
return records
async def get_transaction(self, tx_id: bytes32) -> Optional[TransactionRecord]:
return await self.tx_store.get_transaction_record(tx_id)
async def is_addition_relevant(self, addition: Coin):
"""
Check whether we care about a new addition (puzzle_hash). Returns true if we
control this puzzle hash.
"""
result = await self.puzzle_store.puzzle_hash_exists(addition.puzzle_hash)
return result
async def get_wallet_for_coin(self, coin_id: bytes32) -> Any:
coin_record = await self.coin_store.get_coin_record(coin_id)
if coin_record is None:
return None
wallet_id = uint32(coin_record.wallet_id)
wallet = self.wallets[wallet_id]
return wallet
async def reorg_rollback(self, height: int):
"""
Rolls back and updates the coin_store and transaction store. It's possible this height
is the tip, or even beyond the tip.
"""
await self.coin_store.rollback_to_block(height)
reorged: List[TransactionRecord] = await self.tx_store.get_transaction_above(height)
await self.tx_store.rollback_to_block(height)
for record in reorged:
if record.type in [
TransactionType.OUTGOING_TX,
TransactionType.OUTGOING_TRADE,
TransactionType.INCOMING_TRADE,
]:
await self.tx_store.tx_reorged(record, in_transaction=True)
self.tx_pending_changed()
# Removes wallets that were created from a blockchain transaction which got reorged.
remove_ids = []
for wallet_id, wallet in self.wallets.items():
if wallet.type() == WalletType.POOLING_WALLET.value:
remove: bool = await wallet.rewind(height, in_transaction=True)
if remove:
remove_ids.append(wallet_id)
for wallet_id in remove_ids:
await self.user_store.delete_wallet(wallet_id, in_transaction=True)
self.wallets.pop(wallet_id)
async def _await_closed(self) -> None:
await self.db_connection.close()
if self.weight_proof_handler is not None:
self.weight_proof_handler.cancel_weight_proof_tasks()
def unlink_db(self):
Path(self.db_path).unlink()
async def get_all_wallet_info_entries(self, wallet_type: Optional[WalletType] = None) -> List[WalletInfo]:
return await self.user_store.get_all_wallet_info_entries(wallet_type)
async def get_start_height(self):
"""
If we have coin use that as starting height next time,
otherwise use the peak
"""
return 0
async def get_wallet_for_asset_id(self, asset_id: str):
for wallet_id in self.wallets:
wallet = self.wallets[wallet_id]
if wallet.type() == WalletType.CAT:
if bytes(wallet.cat_info.limitations_program_hash).hex() == asset_id:
return wallet
return None
async def add_new_wallet(self, wallet: Any, wallet_id: int, create_puzzle_hashes=True, in_transaction=False):
self.wallets[uint32(wallet_id)] = wallet
if create_puzzle_hashes:
await self.create_more_puzzle_hashes(in_transaction=in_transaction)
self.state_changed("wallet_created")
async def get_spendable_coins_for_wallet(self, wallet_id: int, records=None) -> Set[WalletCoinRecord]:
if records is None:
records = await self.coin_store.get_unspent_coins_for_wallet(wallet_id)
# Coins that are currently part of a transaction
unconfirmed_tx: List[TransactionRecord] = await self.tx_store.get_unconfirmed_for_wallet(wallet_id)
removal_dict: Dict[bytes32, Coin] = {}
for tx in unconfirmed_tx:
for coin in tx.removals:
# TODO, "if" might not be necessary once unconfirmed tx doesn't contain coins for other wallets
if await self.does_coin_belong_to_wallet(coin, wallet_id):
removal_dict[coin.name()] = coin
# Coins that are part of the trade
offer_locked_coins: Dict[bytes32, WalletCoinRecord] = await self.trade_manager.get_locked_coins()
filtered = set()
for record in records:
if record.coin.name() in offer_locked_coins:
continue
if record.coin.name() in removal_dict:
continue
filtered.add(record)
return filtered
async def create_action(
self, name: str, wallet_id: int, wallet_type: int, callback: str, done: bool, data: str, in_transaction: bool
):
await self.action_store.create_action(name, wallet_id, wallet_type, callback, done, data, in_transaction)
self.tx_pending_changed()
async def generator_received(self, height: uint32, header_hash: uint32, program: Program):
actions: List[WalletAction] = await self.action_store.get_all_pending_actions()
for action in actions:
data = json.loads(action.data)
action_data = data["data"]["action_data"]
if action.name == "request_generator":
stored_header_hash = bytes32(hexstr_to_bytes(action_data["header_hash"]))
stored_height = uint32(action_data["height"])
if stored_header_hash == header_hash and stored_height == height:
if action.done:
return None
wallet = self.wallets[uint32(action.wallet_id)]
callback_str = action.wallet_callback
if callback_str is not None:
callback = getattr(wallet, callback_str)
await callback(height, header_hash, program, action.id)
async def puzzle_solution_received(self, response: RespondPuzzleSolution):
unwrapped: PuzzleSolutionResponse = response.response
actions: List[WalletAction] = await self.action_store.get_all_pending_actions()
for action in actions:
data = json.loads(action.data)
action_data = data["data"]["action_data"]
if action.name == "request_puzzle_solution":
stored_coin_name = bytes32(hexstr_to_bytes(action_data["coin_name"]))
height = uint32(action_data["height"])
if stored_coin_name == unwrapped.coin_name and height == unwrapped.height:
if action.done:
return None
wallet = self.wallets[uint32(action.wallet_id)]
callback_str = action.wallet_callback
if callback_str is not None:
callback = getattr(wallet, callback_str)
await callback(unwrapped, action.id)
async def new_peak(self, peak: wallet_protocol.NewPeakWallet):
for wallet_id, wallet in self.wallets.items():
if wallet.type() == uint8(WalletType.POOLING_WALLET):
await wallet.new_peak(peak.height)
async def add_interested_puzzle_hashes(
self, puzzle_hashes: List[bytes32], wallet_ids: List[int], in_transaction: bool = False
) -> None:
for puzzle_hash, wallet_id in zip(puzzle_hashes, wallet_ids):
await self.interested_store.add_interested_puzzle_hash(puzzle_hash, wallet_id, in_transaction)
if len(puzzle_hashes) > 0:
await self.wallet_node.new_peak_queue.subscribe_to_puzzle_hashes(puzzle_hashes)
async def add_interested_coin_ids(self, coin_ids: List[bytes32], in_transaction: bool = False) -> None:
for coin_id in coin_ids:
await self.interested_store.add_interested_coin_id(coin_id, in_transaction)
if len(coin_ids) > 0:
await self.wallet_node.new_peak_queue.subscribe_to_coin_ids(coin_ids)
async def delete_trade_transactions(self, trade_id: bytes32):
txs: List[TransactionRecord] = await self.tx_store.get_transactions_by_trade_id(trade_id)
for tx in txs:
await self.tx_store.delete_transaction_record(tx.name)
async def convert_puzzle_hash(self, wallet_id: uint32, puzzle_hash: bytes32) -> bytes32:
wallet = self.wallets[wallet_id]
# This should be general to wallets but for right now this is just for CATs so we'll add this if
if wallet.type() == WalletType.CAT.value:
return await wallet.convert_puzzle_hash(puzzle_hash)
return puzzle_hash
|
the-stack_0_13151 | from models import *
import sqlalchemy as db
from sqlalchemy.orm import *
from sqlalchemy import inspect
from sqlalchemy_schemadisplay import create_schema_graph
class DatabaseHelper(object):
# params
__session = None
__engine = None
def __new__(cls, *args, **kwargs):
if not hasattr(cls, '__instance'):
cls.__instance = super(DatabaseHelper, cls).__new__(cls)
return cls.__instance
@classmethod
def get_engine(cls):
if cls.__engine is None:
cls.__engine = engine = db.create_engine("postgresql://postgres:postgres@localhost:5432/postgres")
print("Created database engine ", cls.__engine)
return cls.__engine
@classmethod
def get_session(cls):
if cls.__session is None:
cls.__session = Session(cls.get_engine())
print("Database session opened")
return cls.__session
@classmethod
def close(cls):
if cls.__session:
cls.__session.close()
print("Database session closed")
@classmethod
def __getInspector(cls):
return inspect(cls.get_engine())
@classmethod
def rollback_session(cls):
cls.get_session().rollback()
@classmethod
def getListOfTables(cls):
inspector = cls.__getInspector()
table_list = []
for table_name in inspector.get_table_names():
if table_name is None: continue
table_list.append(table_name)
return table_list
@classmethod
def getTableColumns(cls, table_name):
inspector = cls.__getInspector()
return inspector.get_columns(table_name)
@classmethod
def get_primary_keys(cls, table_name):
inspector = cls.__getInspector()
return inspector.get_primary_keys(table_name)
@classmethod
def get_foreign_keys(cls, table_name):
inspector = cls.__getInspector()
return inspector.get_foreign_keys(table_name)
@classmethod
def fill_db_object(cls, model, inflator_object):
assert (isinstance(inflator_object, object))
db_object = model.create()
for key in inflator_object.keys():
setattr(db_object, key.name, inflator_object[key])
return db_object
@classmethod
def getTableObject(cls, table_name):
if table_name == 'promoter':
return promoter
if table_name == 'ad':
return ad
if table_name == 'theme':
return theme
if table_name == 'user':
return user
if table_name == 'user_theme':
return user_theme
if table_name == 'product':
return product
if table_name == 'session':
return session |
the-stack_0_13154 | import os
import sys
import glob
import unittest
import datetime
def create_test_suite():
"""
Runs through the list of unit tests available in the src/test folder, executing
each in turn.
"""
testFolder = sys.path[0]
testFolderTokens = testFolder.split('/')
moduleStrings = []
if testFolderTokens[-1] == 'src':
testFiles = glob.glob(testFolder+'/test/test_*.py')
for testFile in testFiles:
testFileName = testFile.split('/')[-1].replace('.py','')
moduleStrings.append('test.'+testFileName)
else:
testFiles = glob.glob(testFolder+'/test/test_*.py')
for testFile in testFiles:
testFileName = testFile.split('/')[-1].replace('.py','')
moduleStrings.append('test.'+testFileName)
print('Running Suite of Tests...',datetime.datetime.now())
suites = [unittest.defaultTestLoader.loadTestsFromName(name) for name in moduleStrings]
testSuite = unittest.TestSuite(suites)
return testSuite
|
the-stack_0_13155 | from distutils.version import LooseVersion
import pytest
import numpy as np
from collections import defaultdict
from itertools import combinations
from opensfm import multiview
from opensfm.synthetic_data import synthetic_examples
def pytest_configure(config):
use_legacy_numpy_printoptions()
def use_legacy_numpy_printoptions():
"""Ensure numpy use legacy print formant."""
if LooseVersion(np.__version__).version[:2] > [1, 13]:
np.set_printoptions(legacy='1.13')
@pytest.fixture(scope='module')
def scene_synthetic():
np.random.seed(42)
data = synthetic_examples.synthetic_ellipse_scene()
maximum_depth = 40
projection_noise = 1.0
gps_noise = 5.0
exifs = data.get_scene_exifs(gps_noise)
features, desc, colors, graph = data.get_tracks_data(maximum_depth,
projection_noise)
return data, exifs, features, desc, colors, graph
@pytest.fixture(scope='session')
def scene_synthetic_cube():
np.random.seed(42)
data = synthetic_examples.synthetic_cube_scene()
_, _, _, tracks_manager = data.get_tracks_data(40, 0.0)
return data.get_reconstruction(), tracks_manager
@pytest.fixture(scope='module')
def pairs_and_poses():
np.random.seed(42)
data = synthetic_examples.synthetic_cube_scene()
reconstruction = data.get_reconstruction()
scale = 0.0
features, _, _, tracks_manager = data.get_tracks_data(40, scale)
points_keys = list(reconstruction.points.keys())
pairs, poses = defaultdict(list), defaultdict(list)
for im1, im2 in tracks_manager.get_all_pairs_connectivity():
tuples = tracks_manager.get_all_common_observations(im1, im2)
f1 = [p.point for k, p, _ in tuples if k in points_keys]
f2 = [p.point for k, _, p in tuples if k in points_keys]
pairs[im1, im2].append((f1, f2))
poses[im1, im2] = reconstruction.shots[im2].pose.\
compose(reconstruction.shots[im1].pose.inverse())
camera = list(reconstruction.cameras.values())[0]
return pairs, poses, camera, features, tracks_manager, reconstruction
@pytest.fixture(scope='module')
def pairs_and_their_E(pairs_and_poses):
pairs, poses, camera, _, _, _ = pairs_and_poses
pairs = list(sorted(zip(pairs.values(), poses.values()), key=lambda x: -len(x[0])))
num_pairs = 20
indices = [np.random.randint(0, len(pairs)-1) for i in range(num_pairs)]
ret_pairs = []
for idx in indices:
pair = pairs[idx]
p1 = np.array([x for x, _ in pair[0]])
p2 = np.array([x for _, x in pair[0]])
p1 = p1.reshape(-1, p1.shape[-1])
p2 = p2.reshape(-1, p2.shape[-1])
f1 = camera.pixel_bearing_many(p1)
f2 = camera.pixel_bearing_many(p2)
pose = pair[1]
R = pose.get_rotation_matrix()
t_x = multiview.cross_product_matrix(pose.get_origin())
e = R.dot(t_x)
e /= np.linalg.norm(e)
ret_pairs.append((f1, f2, e, pose))
return ret_pairs
@pytest.fixture(scope='module')
def shots_and_their_points(pairs_and_poses):
_, _, _, _, tracks_manager, reconstruction = pairs_and_poses
ret_shots = []
for shot in reconstruction.shots.values():
bearings, points = [], []
for k, obs in tracks_manager.get_shot_observations(shot.id).items():
if k not in reconstruction.points:
continue
p = reconstruction.points[k]
bearings.append(shot.camera.pixel_bearing(obs.point))
points.append(p.coordinates)
ret_shots.append((shot.pose, np.array(bearings), np.array(points)))
return ret_shots
|
the-stack_0_13157 | """
Huang G. et al. "`Densely Connected Convolutional Networks
<https://arxiv.org/abs/1608.06993>`_"
"""
import tensorflow as tf
from ... import is_best_practice
from . import TFModel
from .layers import conv_block
class DenseNet(TFModel):
""" DenseNet
**Configuration**
inputs : dict
dict with 'images' and 'labels'. See :meth:`.TFModel._make_inputs`.
input_block : dict
body : dict
num_layers : list of int
number of layers in dense blocks
block : dict
parameters for dense block, including :func:`~.layers.conv_block` parameters, as well as
growth_rate : int
number of output filters in each layer (default=32)
bottleneck : bool
whether to use 1x1 convolutions in each layer (default=True)
skip : bool
whether to concatenate inputs to the output tensor
transition_layer : dict
parameters for transition layers, including :func:`~.layers.conv_block` parameters, as well as
reduction_factor : float
a multiplier for number of output filters (default=1)
"""
@classmethod
def default_config(cls):
config = TFModel.default_config()
config['common/conv/use_bias'] = False
config['input_block'].update(dict(layout='cnap', filters=16, kernel_size=7, strides=2,
pool_size=3, pool_strides=2))
config['body/block'] = dict(layout='nacd', dropout_rate=.2, growth_rate=32, bottleneck=True, skip=True)
config['body/transition_layer'] = dict(layout='nacv', kernel_size=1, strides=1,
pool_size=2, pool_strides=2, reduction_factor=1)
config['head'] = dict(layout='Vf')
config['loss'] = 'ce'
if is_best_practice('optimizer'):
config['optimizer'].update(name='Adam')
else:
lr = 1e-1
# boundaries - the number of iterations on the 150th and 225th epochs on CIFAR with batch size=64
config['decay'] = ('const', dict(boundaries=[117300, 175950], values=[lr, lr/10, lr/100]))
config['optimizer'] = ('Momentum', dict(momentum=.9))
return config
def build_config(self, names=None):
config = super().build_config(names)
if config.get('head/units') is None:
config['head/units'] = self.num_classes('targets')
if config.get('head/filters') is None:
config['head/filters'] = self.num_classes('targets')
return config
@classmethod
def body(cls, inputs, name='body', **kwargs):
""" Base layers
Parameters
----------
inputs : tf.Tensor
input tensor
name : str
scope name
Returns
-------
tf.Tensor
"""
kwargs = cls.fill_params('body', **kwargs)
num_layers, block, transition = cls.pop(['num_layers', 'block', 'transition_layer'], kwargs)
block = {**kwargs, **block}
transition = {**kwargs, **transition}
with tf.variable_scope(name):
x, inputs = inputs, None
for i, n_layers in enumerate(num_layers):
x = cls.block(x, num_layers=n_layers, name='block-%d' % i, **block)
if i < len(num_layers) - 1:
x = cls.transition_layer(x, name='transition-%d' % i, **transition)
return x
@classmethod
def block(cls, inputs, num_layers=3, name=None, **kwargs):
""" A network building block consisting of a stack of 1x1 and 3x3 convolutions.
Parameters
----------
inputs : tf.Tensor
input tensor
num_layers : int
number of conv layers
name : str
scope name
Returns
-------
tf.Tensor
"""
kwargs = cls.fill_params('body/block', **kwargs)
layout, growth_rate, bottleneck, skip = \
cls.pop(['layout', 'growth_rate', 'bottleneck', 'skip'], kwargs)
with tf.variable_scope(name):
axis = cls.channels_axis(kwargs['data_format'])
x = inputs
all_layers = []
for i in range(num_layers):
if len(all_layers) > 0:
x = tf.concat([inputs] + all_layers, axis=axis, name='concat-%d' % i)
if bottleneck:
x = conv_block(x, filters=growth_rate * 4, kernel_size=1, layout=layout,
name='bottleneck-%d' % i, **kwargs)
x = conv_block(x, filters=growth_rate, kernel_size=3, layout=layout,
name='conv-%d' % i, **kwargs)
all_layers.append(x)
if skip:
all_layers = [inputs] + all_layers
x = tf.concat(all_layers, axis=axis, name='concat-%d' % num_layers)
return x
@classmethod
def transition_layer(cls, inputs, name='transition_layer', **kwargs):
""" An intermediary interconnect layer between two dense blocks
Parameters
----------
inputs : tf.Tensor
input tensor
name : str
scope name
Returns
-------
tf.Tensor
"""
kwargs = cls.fill_params('body/transition_layer', **kwargs)
reduction_factor = cls.get('reduction_factor', kwargs)
num_filters = cls.num_channels(inputs, kwargs.get('data_format'))
return conv_block(inputs, filters=num_filters * reduction_factor, name=name, **kwargs)
class DenseNet121(DenseNet):
""" The original DenseNet-121 architecture """
@classmethod
def default_config(cls):
config = DenseNet.default_config()
config['body']['num_layers'] = [6, 12, 24, 32]
return config
class DenseNet169(DenseNet):
""" The original DenseNet-169 architecture """
@classmethod
def default_config(cls):
config = DenseNet.default_config()
config['body']['num_layers'] = [6, 12, 32, 16]
return config
class DenseNet201(DenseNet):
""" The original DenseNet-201 architecture """
@classmethod
def default_config(cls):
config = DenseNet.default_config()
config['body']['num_layers'] = [6, 12, 48, 32]
return config
class DenseNet264(DenseNet):
""" The original DenseNet-264 architecture """
@classmethod
def default_config(cls):
config = DenseNet.default_config()
config['body']['num_layers'] = [6, 12, 64, 48]
return config
|
the-stack_0_13159 | """From RAW data filter out sentences not containing the word mouse or mice."""
import click
import codecs
import os
import src.data.dataset as dataset
@click.command()
@click.argument('read_directory', type=click.Path(dir_okay=True),
default=dataset.DEFAULT_RAW_DATA_DIRECTORY)
@click.argument('save_directory', type=click.Path(writable=True, dir_okay=True),
default=dataset.DEFAULT_INTERIM_DATA_DIRECTORY)
@click.argument('encoding', default=dataset.DEFAULT_ENCODING)
def filter_raw_data(read_directory, save_directory, encoding='utf-8'):
"""Filter out sentences not containing 'mouse' and change 'mice' to 'mouse'
READ_DIRECTORY is directory to read raw data from.
Default: <project_root>/data/raw
SAVE_DIRECTORY is directory to store filtered data.
Default: <project_root>/data/interim
ENCODING is the encoding used to save the filtered sentences
Default: 'utf-8'
Creates files 'animal.txt' and 'device.txt' in SAVE_DIRECTORY
"""
for context in ['animal', 'device']:
read_dir = os.path.join(read_directory, context)
save_dir = os.path.join(save_directory, '{}.txt'.format(context))
filenames = [filename for filename in os.listdir(read_dir) if filename.endswith('.txt')]
print(filenames)
with codecs.open(save_dir, 'w', encoding) as of:
for filename in filenames:
read_path = os.path.join(read_dir, filename)
with codecs.open(read_path, 'r', encoding) as rf:
text = rf.read()
processed_text = dataset.process_text(text)
for sentence in processed_text:
of.write(sentence)
of.write('\n')
if __name__ == '__main__':
filter_raw_data()
|
the-stack_0_13163 | # Demo: (Audio) -> (Label)
import gradio as gr
import numpy as np
from scipy.fftpack import fft
import matplotlib.pyplot as plt
from math import log2, pow
A4 = 440
C0 = A4*pow(2, -4.75)
name = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]
def get_pitch(freq):
h = round(12*log2(freq/C0))
n = h % 12
return name[n]
def main_note(audio):
rate, y = audio
if len(y.shape) == 2:
y = y.T[0]
N = len(y)
T = 1.0 / rate
x = np.linspace(0.0, N*T, N)
yf = fft(y)
yf2 = 2.0/N * np.abs(yf[0:N//2])
xf = np.linspace(0.0, 1.0/(2.0*T), N//2)
volume_per_pitch = {}
total_volume = np.sum(yf2)
for freq, volume in zip(xf, yf2):
if freq == 0:
continue
pitch = get_pitch(freq)
if pitch not in volume_per_pitch:
volume_per_pitch[pitch] = 0
volume_per_pitch[pitch] += 1.0 * volume / total_volume
return volume_per_pitch
io = gr.Interface(
main_note,
"microphone",
gr.outputs.Label(num_top_classes=4),
examples=[
["audio/recording1.wav"],
["audio/cantina.wav"],
],
interpretation="default")
io.launch()
|
the-stack_0_13164 | # -*- coding: utf-8 -*-
from typing import Dict
import os
import pkg_resources
from bag.design import Module
yaml_file = pkg_resources.resource_filename(__name__, os.path.join('netlist_info', 'r2r_dac.yaml'))
# noinspection PyPep8Naming
class adc_sar_templates__r2r_dac(Module):
"""Module for library adc_sar_templates cell r2r_dac.
Fill in high level description here.
"""
def __init__(self, bag_config, parent=None, prj=None, **kwargs):
Module.__init__(self, bag_config, yaml_file, parent=parent, prj=prj, **kwargs)
def design(self, lch, pw, nw, m, num_series, num_bits, device_intent='fast'):
"""To be overridden by subclasses to design this module.
This method should fill in values for all parameters in
self.parameters. To design instances of this module, you can
call their design() method or any other ways you coded.
To modify schematic structure, call:
rename_pin()
delete_instance()
replace_instance_master()
reconnect_instance_terminal()
restore_instance()
array_instance()
"""
self.parameters['lch'] = lch
self.parameters['pw'] = pw
self.parameters['nw'] = nw
self.parameters['m'] = m
self.parameters['num_series'] = num_series
self.parameters['num_bits'] = num_bits
self.parameters['device_intent'] = device_intent
# array generation
name_list = []
term_list = []
for i in range(num_bits):
if i == 0:
term_list.append({'O': 'out',
'EN': 'EN<%d>' %(num_bits-1-i),
'ENB': 'ENB<%d>' %(num_bits-1-i)
})
else:
term_list.append({'O': 'int%d' %(num_bits-i),
'EN': 'EN<%d>' %(num_bits-1-i),
'ENB': 'ENB<%d>' %(num_bits-1-i)
})
name_list.append('I2RVDD%d' % (num_bits-1-i))
self.array_instance('I2RVDD', name_list, term_list=term_list)
for i in range(num_bits):
self.instances['I2RVDD'][i].design(lch=lch, pw=pw, nw=nw, m=m, num_series=num_series, device_intent=device_intent)
# array generation
name_list = []
term_list = []
for i in range(num_bits):
if i == 0:
term_list.append({'O': 'out',
'EN': 'ENB<%d>' %(num_bits-1-i),
'ENB': 'EN<%d>' %(num_bits-1-i)
})
else:
term_list.append({'O': 'int%d' %(num_bits-i),
'EN': 'ENB<%d>' %(num_bits-1-i),
'ENB': 'EN<%d>' %(num_bits-1-i)
})
name_list.append('I2RVSS%d' % (num_bits-1-i))
self.array_instance('I2RVSS', name_list, term_list=term_list)
for i in range(num_bits):
self.instances['I2RVSS'][i].design(lch=lch, pw=pw, nw=nw, m=m, num_series=num_series, device_intent=device_intent)
# array generation
name_list = []
term_list = []
for i in range(num_bits):
if i == 0:
term_list.append({'I': 'out',
'O': 'int%d' %(num_bits-1-i),
})
elif i == num_bits-1:
term_list.append({'I': 'int%d' %(num_bits-i),
'O': 'VSS',
})
else:
term_list.append({'I': 'int%d' %(num_bits-i),
'O': 'int%d' %(num_bits-1-i),
})
if not i == num_bits-1:
name_list.append('IR%d' % (num_bits-1-i))
else:
name_list.append('IR%d' % (num_bits-1-i))
self.array_instance('IR', name_list, term_list=term_list)
for i in range(num_bits):
if i == num_bits-1:
self.instances['IR'][i].design(lch=lch, pw=pw, nw=nw, m=m, num_series=num_series, device_intent=device_intent)
else:
self.instances['IR'][i].design(lch=lch, pw=pw, nw=nw, m=m, num_series=int(num_series/2), device_intent=device_intent)
# inv array generation
name_list = []
term_list = []
term_list.append({'O': 'ENB<%d:0>' %(num_bits-1),
'I': 'SEL<%d:0>' %(num_bits-1),
})
name_list.append('IINV0<%d:0>' %(num_bits-1))
self.array_instance('IINV0', name_list, term_list=term_list)
self.instances['IINV0'][0].design(lch=lch, pw=pw, nw=nw, m=2, device_intent=device_intent)
name_list = []
term_list = []
term_list.append({'O': 'EN<%d:0>' %(num_bits-1),
'I': 'ENB<%d:0>' %(num_bits-1),
})
name_list.append('IINV1<%d:0>' %(num_bits-1))
self.array_instance('IINV1', name_list, term_list=term_list)
self.instances['IINV1'][0].design(lch=lch, pw=pw, nw=nw, m=2, device_intent=device_intent)
self.rename_pin('SEL', 'SEL<%d:0>' % (num_bits - 1))
# self.rename_pin('ZP<0>', 'ZP<%d:0>' % (num_bits - 1))
# self.rename_pin('ZMID<0>', 'ZMID<%d:0>' % (num_bits - 1))
# self.rename_pin('ZM<0>', 'ZM<%d:0>' % (num_bits - 1))
# self.rename_pin('RETO<0>', 'RETO<%d:0>' % (num_bits - 1))
#
# name_list_p = []
# name_list_n = []
# term_list = []
# for i in range(num_bits):
# for j in range(num_inv_bb):
# if j == (num_inv_bb - 1):
# term_list.append({'G': 'ZP%d' % (j) + '<%d>' % (i),
# 'D': 'ZP<%d>' % (i),
# })
# else:
# term_list.append({'G': 'ZP%d' % (j) + '<%d>' % (i),
# 'D': 'ZP%d' % (j + 1) + '<%d>' % (i),
# })
# name_list_p.append('IBUFP0%d' % (j) + '<%d>' % (i))
# name_list_n.append('IBUFN0%d' % (j) + '<%d>' % (i))
# self.array_instance('IBUFP0', name_list_p, term_list=term_list)
# self.array_instance('IBUFN0', name_list_n, term_list=term_list)
# for i in range(num_bits * num_inv_bb):
# self.instances['IBUFP0'][i].design(w=pw, l=lch, nf=m, intent=device_intent)
# self.instances['IBUFN0'][i].design(w=pw, l=lch, nf=m, intent=device_intent)
# name_list_p = []
# name_list_n = []
# term_list = []
# for i in range(num_bits):
# for j in range(num_inv_bb):
# if j == (num_inv_bb - 1):
# term_list.append({'G': 'ZMID%d' % (j) + '<%d>' % (i),
# 'D': 'ZMID<%d>' % (i),
# })
# else:
# term_list.append({'G': 'ZMID%d' % (j) + '<%d>' % (i),
# 'D': 'ZMID%d' % (j + 1) + '<%d>' % (i),
# })
# name_list_p.append('IBUFP1%d' % (j) + '<%d>' % (i))
# name_list_n.append('IBUFN1%d' % (j) + '<%d>' % (i))
# self.array_instance('IBUFP1', name_list_p, term_list=term_list)
# self.array_instance('IBUFN1', name_list_n, term_list=term_list)
# for i in range(num_bits * num_inv_bb):
# self.instances['IBUFP1'][i].design(w=pw, l=lch, nf=m, intent=device_intent)
# self.instances['IBUFN1'][i].design(w=pw, l=lch, nf=m, intent=device_intent)
# name_list_p = []
# name_list_n = []
# term_list = []
# for i in range(num_bits):
# for j in range(num_inv_bb):
# if j == (num_inv_bb - 1):
# term_list.append({'G': 'ZM%d' % (j) + '<%d>' % (i),
# 'D': 'ZM<%d>' % (i),
# })
# else:
# term_list.append({'G': 'ZM%d' % (j) + '<%d>' % (i),
# 'D': 'ZM%d' % (j + 1) + '<%d>' % (i),
# })
# name_list_p.append('IBUFP2%d' % (j) + '<%d>' % (i))
# name_list_n.append('IBUFN2%d' % (j) + '<%d>' % (i))
# self.array_instance('IBUFP2', name_list_p, term_list=term_list)
# self.array_instance('IBUFN2', name_list_n, term_list=term_list)
# for i in range(num_bits * num_inv_bb):
# self.instances['IBUFP2'][i].design(w=pw, l=lch, nf=m, intent=device_intent)
# self.instances['IBUFN2'][i].design(w=pw, l=lch, nf=m, intent=device_intent)
def get_layout_params(self, **kwargs):
"""Returns a dictionary with layout parameters.
This method computes the layout parameters used to generate implementation's
layout. Subclasses should override this method if you need to run post-extraction
layout.
Parameters
----------
kwargs :
any extra parameters you need to generate the layout parameters dictionary.
Usually you specify layout-specific parameters here, like metal layers of
input/output, customizable wire sizes, and so on.
Returns
-------
params : dict[str, any]
the layout parameters dictionary.
"""
return {}
def get_layout_pin_mapping(self):
"""Returns the layout pin mapping dictionary.
This method returns a dictionary used to rename the layout pins, in case they are different
than the schematic pins.
Returns
-------
pin_mapping : dict[str, str]
a dictionary from layout pin names to schematic pin names.
"""
return {} |
the-stack_0_13165 | # -*- coding:UTF-8 -*-
#
# Tencent is pleased to support the open source community by making QTA available.
# Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
#
'''封装ADB功能
'''
from __future__ import unicode_literals
from __future__ import print_function
import os
import re
import six
import subprocess
import sys
import threading
import time
from pkg_resources import iter_entry_points
from qt4a.androiddriver.adbclient import ADBClient
from qt4a.androiddriver.util import Singleton, Deprecated, logger, ThreadEx, TimeoutError, InstallPackageFailedError, PermissionError, is_int, encode_wrap, enforce_utf8_decode
try:
import _strptime # time.strptime() is not thread-safed, so import _strptime first, otherwise it raises an AttributeError: _strptime_time
except:
pass
cur_path = os.path.dirname(os.path.abspath(__file__))
def get_adb_path():
if sys.platform == 'win32':
sep = ';'
file_name = 'adb.exe'
else:
sep = ':'
file_name = 'adb'
for root in os.environ.get('PATH').split(sep):
adb_path = os.path.join(root, file_name)
if os.path.exists(adb_path): # 优先使用环境变量中指定的 adb
return adb_path
return os.path.join(cur_path, 'tools', 'adb', sys.platform, file_name)
adb_path = get_adb_path()
def is_adb_server_opend():
'''判断ADB Server是否开启
'''
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.bind(('localhost', 5037))
sock.close()
return False
except:
return True
class EnumRootState(object):
'''设备Root状态
'''
Unknown = 0 # 未知
NonRoot = 1 # 非Root
AdbdRoot = 2 # adbd以Root权限执行(执行adb root后)
SuRoot = 3 # 使用su进入Root
class IADBBackend(object):
'''ADBBackend接口定义
'''
@staticmethod
def list_device():
'''枚举设备列表
'''
pass
@staticmethod
def open_device(name):
'''打开指定设备
:param name: 设备名称
:type name: str
:return: IADBBackend实例
'''
pass
@property
def device_host(self):
'''设备主机
'''
pass
@property
def device_name(self):
'''设备名
'''
pass
def run_adb_cmd(self, cmd, *args, **kwargs):
'''执行adb命令
'''
pass
class LocalADBBackend(IADBBackend):
'''本地ADBBackend
'''
@staticmethod
def start():
if is_adb_server_opend():
return False
subprocess.call([adb_path, 'start-server'])
return True
@staticmethod
def close():
subprocess.call([adb_path, 'kill-server'])
@staticmethod
def list_device(device_host='127.0.0.1'):
'''枚举设备列表
'''
if not is_adb_server_opend():
return []
result = ADBClient.get_client(device_host).call(
'devices', retry_count=3)[0]
result = result.split('\n')
device_list = []
for device in result:
if len(device) <= 1 or not '\t' in device:
continue
device_name, status = device.split('\t')
if status != 'device':
continue
device_list.append(device_name)
return device_list
@staticmethod
def open_device(name):
'''打开指定设备
:param name: 设备名称
:type name: str
:return: IADBBackend实例
'''
device_host = '127.0.0.1'
if ':' in name:
pattern = re.compile(r'^\d{3,5}$')
pos = name.find(':')
hostname = name[:pos]
if not pattern.match(name[pos + 1:]):
# adb connect device
name = name[pos + 1:]
device_host = hostname
if name not in LocalADBBackend.list_device(device_host):
raise RuntimeError('Device %s not exist in host %s' %
(name, device_host))
return LocalADBBackend(device_host, name)
def __init__(self, device_host, device_name, port=5037):
self._device_host = device_host
self._device_host_port = port
self._device_name = device_name
self._adb_client = ADBClient.get_client(
self._device_host, self._device_host_port)
@property
def device_host(self):
'''设备主机
'''
return self._device_host
@property
def device_name(self):
'''设备名
'''
return self._device_name
def run_adb_cmd(self, cmd, *args, **kwargs):
'''执行adb命令
'''
timeout = kwargs.pop('timeout')
sync = kwargs.pop('sync')
return self._adb_client.call(cmd, self._device_name, *args, sync=sync, retry_count=1, timeout=timeout)
def static_result(func):
'''固定返回结果函数
'''
def _wrap_func(self):
attr = '_%s_result' % func.__name__
if not hasattr(self, attr):
result = func(self)
setattr(self, attr, result)
return getattr(self, attr)
return _wrap_func
class ADB(object):
'''封装ADB功能
'''
armeabi = 'armeabi'
x86 = 'x86'
connect_timeout = 300 # 连接设备的超时时间
def __init__(self, backend):
self._backend = backend
self._device_name = self._backend.device_name
self._root_state = EnumRootState.Unknown
self._need_quote = None # 执行shell命令时有些手机需要引号,有些不需要
self._log_filter_thread_list = [] # 不打印log的线程id列表
self._shell_prefix = None # 有些设备上会有固定输出
self._logcat_callbacks = []
self._newline = None # 不同手机使用的换行会不同
@property
def device_host(self):
'''设备主机名
'''
return self._backend.device_host
@property
def device_name(self):
'''设备名
'''
return self._backend.device_name
def add_no_log_thread(self, thread):
'''添加线程到不打印日志线程列表
'''
if not thread.ident in self._log_filter_thread_list:
self._log_filter_thread_list.append(thread.ident)
def remove_no_log_thread(self, thread):
'''移除不打印日志线程列表中指定线程
'''
if thread.ident in self._log_filter_thread_list:
self._log_filter_thread_list.remove(thread.ident)
def run_adb_cmd(self, cmd, *args, **kwargs):
'''执行adb命令
'''
retry_count = 3 # 默认最多重试3次
if 'retry_count' in kwargs:
retry_count = kwargs.pop('retry_count')
timeout = 20
if 'timeout' in kwargs:
timeout = kwargs.pop('timeout')
sync = True
if 'sync' in kwargs:
sync = kwargs.pop('sync')
for _ in range(retry_count):
if not threading.current_thread().ident in self._log_filter_thread_list:
logger.info('adb %s:%s %s %s' % (
self._backend.device_host, self._backend.device_name, cmd, ' '.join(args)))
time0 = time.clock()
try:
result = self._backend.run_adb_cmd(
cmd, *args, sync=sync, timeout=timeout, **kwargs)
except Exception as e:
logger.exception('Exec adb %s failed: %s' % (cmd, e))
continue
if not isinstance(result, tuple):
return result
if not threading.current_thread().ident in self._log_filter_thread_list:
logger.info('执行ADB命令耗时:%s' % (time.clock() - time0))
out, err = result
if err:
if b'error: device not found' in err:
self.run_adb_cmd('wait-for-device', retry_count=1,
timeout=self.connect_timeout) # 等待设备连接正常
return self.run_adb_cmd(cmd, *args, **kwargs)
return err
if isinstance(out, (bytes, str)):
out = out.strip()
return out
def run_shell_cmd(self, cmd_line, root=False, **kwds):
'''运行shell命令
:param cmd_line: 要运行的命令行
:param root: 是否使用root权限
'''
if not self._newline:
result = self.run_adb_cmd('shell', 'echo "1\n2"')
if b'\r\n' in result:
self._newline = b'\r\n'
else:
self._newline = b'\n'
binary_output = False
if 'binary_output' in kwds:
binary_output = kwds.pop('binary_output')
def _handle_result(result):
if not isinstance(result, (bytes, str)):
return result
if self._newline != b'\n':
result = result.replace(self._newline, b'\n')
if binary_output:
return result
else:
result = result.decode('utf8')
if self._shell_prefix != None and self._shell_prefix > 0:
result = '\n'.join(result.split('\n')[self._shell_prefix:])
if result.startswith('WARNING: linker:'):
# 虚拟机上可能会有这种错误:WARNING: linker: libdvm.so has text relocations. This is wasting memory and is a security risk. Please fix.
lines = result.split('\n')
idx = 1
while idx < len(lines):
if not lines[idx].startswith('WARNING: linker:'):
break
idx += 1
return '\n'.join(lines[idx:]).strip()
else:
return result
if root:
need_su = True
if self._root_state == EnumRootState.Unknown:
self._root_state = self.get_root_state()
if self._root_state == EnumRootState.AdbdRoot:
need_su = False
elif self._root_state == EnumRootState.NonRoot:
raise RuntimeError('device is not rooted')
if not need_su:
return self.run_shell_cmd(cmd_line, **kwds)
if self._need_quote == None:
self._check_need_quote()
if self._need_quote:
cmd_line = 'su -c \'%s\'' % cmd_line
else:
cmd_line = 'su -c %s' % cmd_line
return _handle_result(self.run_adb_cmd('shell', '%s' % cmd_line, **kwds))
def reboot(self, _timeout=180):
'''重启手机'''
try:
self.run_adb_cmd('reboot', retry_count=1, timeout=30)
except TimeoutError:
# 使用强杀init进程方式重启手机
self.kill_process(1)
time.sleep(10) # 等待手机重启
if _timeout > 0:
self.wait_for_boot_complete(_timeout)
def wait_for_boot_complete(self, _timeout=180):
'''等待手机启动完成'''
# 手机重启完后 adbd Insecure 启动时会导致adb断开重连,qt4a框架己经实现了adb root权限功能,测试手机请不要安装 adbd Insecure
import time
print('等待手机启动完成...')
self.run_adb_cmd('wait-for-device', timeout=_timeout)
boot_complete = False
attempts = 0
wait_period = 5
while not boot_complete and (attempts * wait_period) < _timeout:
output = self.run_shell_cmd(
"getprop sys.boot_completed", retry_count=1)
output = output.strip()
if output == "1":
boot_complete = True
else:
time.sleep(wait_period)
attempts += 1
if not boot_complete:
raise RuntimeError(
"dev.bootcomplete 标志在 %s 秒后仍未设置,手机重启失败" % _timeout)
def start_logcat(self, process_list=[], params=''):
'''运行logcat进程
:param process_list: 要捕获日志的进程名或进程ID列表,为空则捕获所有进程
:type process_list: list
'''
if not hasattr(self, '_start_count'):
self._start_count = 0
self._start_count += 1
if self._start_count > 1:
return
logger.debug('[ADB] start logcat')
self.run_shell_cmd('logcat -c ' + params) # 清除缓冲区
if not hasattr(self, '_log_list'):
self._log_list = []
self._logcat_running = True
self._log_pipe = self.run_shell_cmd(
'logcat -v threadtime ' + params, sync=False)
# self._logcat_thread_func(process_list)
self._logcat_thread = ThreadEx(
target=self._logcat_thread_func, args=[process_list, params])
self._logcat_thread.setDaemon(True)
self._logcat_thread.start()
self._log_filter_thread_list.append(self._logcat_thread.ident)
def stop_logcat(self):
'''停止logcat
'''
if not hasattr(self, '_start_count') or self._start_count <= 0:
logger.warn('[ADB] logcat not start')
return
self._start_count -= 1
if self._start_count > 0:
return
logger.debug('[ADB] stop logcat')
self._logcat_running = False
if hasattr(self, '_log_pipe'):
if self._log_pipe.poll() == None: # 判断logcat进程是否存在
try:
self._log_pipe.terminate()
except WindowsError as e:
logger.warn('terminate logcat process failed: %s' % e)
if hasattr(self, '_logcat_thread'):
if self._logcat_thread.ident in self._log_filter_thread_list:
self._log_filter_thread_list.remove(self._logcat_thread.ident)
else:
logger.warn('%s not in %s' % (
self._logcat_thread.ident, self._log_filter_thread_list))
def get_log(self, clear=True):
'''获取已经保存的log
'''
if not hasattr(self, '_log_list'):
return []
result = self._log_list
if clear:
self._log_list = []
return result
def save_log(self, save_path):
'''保存log
'''
if not hasattr(self, '_log_list'):
return
log_list = self.get_log()
if six.PY2:
for i in range(len(log_list)):
log = log_list[i]
if not isinstance(log, unicode):
# 先编码为unicode
for code in ['utf8', 'gbk']:
try:
log = log.decode(code)
break
except UnicodeDecodeError as e:
# logger.warn('decode with %s error: %s' % (code, e))
pass
else:
log = repr(log)
log_list[i] = log.encode('utf8') if isinstance(
log, unicode) else log
f = open(save_path, 'w')
f.write('\n'.join(log_list))
f.close()
def add_logcat_callback(self, callback):
'''添加logcat回调
'''
if not callback in self._logcat_callbacks:
self._logcat_callbacks.append(callback)
def remove_logcat_callback(self, callback):
'''移除logcat回调
'''
if callback in self._logcat_callbacks:
self._logcat_callbacks.remove(callback)
def insert_logcat(self, process_name, year, month_day, timestamp, level, tag, tid, content):
self._log_list.append('[%s] [%s-%s %s] %s/%s(%s): %s' % (process_name,
year, month_day, timestamp,
level,
tag,
tid,
content))
pid = 0
pattern = re.compile(r'^(.+)\((\d+)\)$')
ret = pattern.match(process_name)
if ret:
process_name = ret.group(1)
pid = int(ret.group(2))
for callback in self._logcat_callbacks:
callback(pid, process_name, '%s-%s' % (year, month_day),
timestamp, level, tag, int(tid), content)
def _logcat_thread_func(self, process_list, params=''):
'''获取logcat线程
'''
import re
# pattern = re.compile(r'([A-Z])/([\w|.]+)\s*\(\s*(\d+)\):.+') #标准格式
# pattern = re.compile(r'([\d|-]+)\s+([\d|:|\.]+)\s+(\d+)\s+(\d+)\s+(\w)\s+(\S+)\s*:\s+(.+)') # [^:]
# 会过滤掉只有内容和内容为空的情况:--------- beginning of /dev/log/main not match pattern;04-16 10:09:25.170 2183 2183 D AndroidRuntime:
pattern = re.compile(
r'([\d|-]+)\s+([\d|:|\.]+)\s+(\d+)\s+(\d+)\s+(\w)\s+(.*?)\s*:\s*(.*)')
# Date Time PID TID Level Tag Content
pid_dict = {}
filter_pid_list = [] # 没有找到匹配进程的列表
zygote_pid = 0 # zygote进程ID
while self._logcat_running:
log = self._log_pipe.stdout.readline()
log = enforce_utf8_decode(log).strip()
if not log:
if self._log_pipe.poll() != None:
logger.debug('logcat进程:%s 已退出' % self._log_pipe.pid)
# 进程已退出
# TODO: 解决logcat重复问题
if not self._logcat_running:
logger.info('logcat线程停止运行')
return
self._log_pipe = self.run_shell_cmd(
'logcat -v threadtime ' + params, sync=False)
else:
continue
ret = pattern.match(log)
if not ret:
logger.info('log: %s not match pattern' % log)
continue
tag = ret.group(6).strip()
if tag in ['inject', 'dexloader', 'ActivityInspect', 'MethodHook', 'androidhook']:
logger.info(log) # 测试桩日志加入到qt4a日志中
continue
if tag in ['Web Console']:
if ret.group(7).startswith('[ClickListener]'):
logger.info(log) # WebView的控件点击信息
continue
pid = int(ret.group(3))
if pid in filter_pid_list:
continue
init_process_list = ['<pre-initialized>', 'zygote']
if not pid in pid_dict.keys():
for item in self.list_process():
if zygote_pid == 0 and item['proc_name'] == 'zygote' and item['ppid'] == 1:
# zygote父进程ID为1
zygote_pid = item['pid']
for init_process in init_process_list:
if item['pid'] in pid_dict and pid_dict[item['pid']].startswith(init_process) and not item['proc_name'].startswith(init_process):
for i in range(len(self._log_list) - 1, -1, -1):
# 修复之前记录的“<pre-initialized>”进程
pre_process_name = '[%s(%d)]' % (
init_process, item['pid'])
if not pre_process_name in self._log_list[i]:
continue
if process_list:
del_flag = True
for process in process_list:
if pid == process or item['proc_name'].startswith(process):
# 替换为真实进程名
self._log_list[i] = self._log_list[i].replace(
pre_process_name, ('[%s(%d)]' % (item['proc_name'], item['pid'])))
del_flag = False
break
if del_flag:
# 不在需要记录的进程列表中
del self._log_list[i]
else:
# 直接替换
self._log_list[i] = self._log_list[i].replace(
pre_process_name, ('[%s(%d)]' % (item['proc_name'], item['pid'])))
pid_dict[item['pid']] = item['proc_name']
# if item['proc_name'] in init_process_list and item['pid'] != zygote_pid:
# pid_dict[item['pid']] += '(%d)' % item['pid']
if not pid in pid_dict.keys():
filter_pid_list.append(pid)
continue
found = False
if not process_list:
found = True # 不指定进程列表则捕获所有进程
else:
for process in process_list:
if pid == process or (pid in pid_dict and (pid_dict[pid].startswith(process) or pid_dict[pid].startswith('<pre-initialized>')
or (pid_dict[pid].startswith('zygote') and pid != zygote_pid))): # 进程初始化中
found = True
break
if found:
import datetime
if not hasattr(self, '_year'):
self._year = datetime.date.today().year
try:
self.insert_logcat('%s(%d)' % (pid_dict.get(pid), pid), self._year, ret.group(
1), ret.group(2), ret.group(5), ret.group(6), ret.group(4), ret.group(7))
except:
logger.exception('Insert logcat failed: %r' % log)
@static_result
def get_root_state(self):
'''获取Root状态
'''
if self.is_adbd_root():
return EnumRootState.AdbdRoot
result = self.run_shell_cmd('su -c id')
if 'su: not found' in result:
return EnumRootState.NonRoot
elif 'uid=0(root)' in result:
return EnumRootState.SuRoot
return EnumRootState.NonRoot
@static_result
def is_adbd_root(self):
'''adbd是否以root权限运行
'''
result = self.run_shell_cmd('id')
logger.debug('is_adbd_root: %s' % result)
return 'uid=0(root)' in result
def is_rooted(self):
return self.get_root_state() in (EnumRootState.AdbdRoot, EnumRootState.SuRoot)
def _check_need_quote(self, timeout=20):
'''
'''
cmd = "su -c 'ls -l /data/data'" # 默认方式为加引号,避免有些手机上对于存在空格的命令容易出错
# 联想S899T上发现不加引号返回结果为空
result = self.run_shell_cmd(cmd, timeout=timeout)
if result.find('com.android.phone') >= 0:
self._need_quote = True
else:
logger.debug(result)
self._need_quote = False
# ifndef __RELEASE__
def _set_system_writable(self):
'''修改system分区可写
'''
result = self.run_shell_cmd('mount', True)
for line in result.split('\n'):
if line.find('/system') >= 0:
block = line.split(' ')[0]
print(block)
self.run_shell_cmd('mount -o remount %s /system' % block, True)
return True
return False
# endif
def forward(self, port1, port2, type='tcp'):
'''端口转发
:param port1: PC上的TCP端口
:type port1: int
:param port2: 手机上的端口或LocalSocket地址
:type port2: int或String
:param type: 手机上的端口类型
:type type: String,LocalSocket地址使用“localabstract”
'''
while 1:
ret = self.run_adb_cmd('forward', 'tcp:%d' %
(port1), '%s:%s' % (type, port2))
if not 'cannot bind socket' in ret and not 'cannot bind to socket' in ret:
return port1
port1 += 1
def remove_forward(self, port):
'''移除指定的端口映射
'''
return 'cannot remove listener' in self.run_adb_cmd('forward', '--remove', 'tcp:%d' % (port))
def create_tunnel(self, addr, type='tcp'):
'''直接创建与手机中socket服务端的连接
'''
sock = self.run_adb_cmd('create_tunnel', '%s:%s' % (type, addr))
if sock == '':
return None
return sock
def _push_file(self, src_path, dst_path):
'''以指定身份拷贝文件到手机中
'''
result = self.run_adb_cmd('push', src_path, dst_path, timeout=None)
if 'No space left on device' in result or 'No such file or directory' in result:
# 如果源文件不存在不会执行到这里
raise RuntimeError('设备存储空间不足')
return result
def push_file(self, src_path, dst_path, uid=None):
'''以指定身份拷贝文件到手机中
'''
if six.PY2 and isinstance(dst_path, unicode):
dst_path = dst_path.encode('utf8')
file_size = 0
for _ in range(3):
file_size = os.path.getsize(src_path) # 防止取到的文件大小不正确
result = self._push_file(src_path, dst_path)
if file_size == 0:
logger.warn('文件大小为0')
return result
if ('%d' % file_size) in result:
try:
_, file_list = self.list_dir(dst_path)
if len(file_list) == 0:
logger.warn('push file failed: file not exist')
elif file_list[0]['size'] != file_size:
logger.warn('push file failed: file size error, expect %d, actual is %d' % (
file_size, file_list[0]['size']))
self.delete_file(dst_path)
else:
logger.debug(repr(file_list[0]))
if uid:
self.chown(dst_path, uid, uid)
return result
except RuntimeError as e:
err_msg = e.args[0]
if six.PY2 and (not isinstance(err_msg, unicode)):
err_msg = err_msg.decode('utf8')
logger.warn(err_msg)
else:
logger.warn('push file failed: %s' % result)
raise RuntimeError('Push file [%d]%s to device [%r] failed: %s' % (
file_size, src_path, self._device_name, result))
def pull_file(self, src_path, dst_path):
'''从手机中拉取文件
'''
result = self.run_adb_cmd('pull', src_path, dst_path, timeout=600)
if 'failed to copy' in result:
raise RuntimeError(result)
if not 'bytes in' in result:
logger.warn(repr(result))
logger.debug(self.run_shell_cmd('ls -l %s' % src_path, True))
return result
@staticmethod
def _get_package_name(apk_path):
'''获取安装包名
'''
import zipfile
from ._axmlparser import AXMLPrinter
package_name = ''
zf = zipfile.ZipFile(apk_path, mode='r')
for i in zf.namelist():
if i == "AndroidManifest.xml":
printer = AXMLPrinter(zf.read(i))
package_name = printer.get_xml_obj().getElementsByTagName('manifest')[
0].getAttribute('package')
break
if not package_name:
raise RuntimeError('获取安装包中的包名信息失败')
return package_name
def _install_apk(self, apk_path, package_name, reinstall=False):
'''
'''
if self.get_sdk_version() <= 19:
timeout = 3 * 60
else:
timeout = 6 * 60 # TODO: 9100安装5.0系统后安装应用超过3分钟
cmdline = 'pm install %s %s' % ('-r' if reinstall else '', apk_path)
ret = ''
for i in range(3):
# 处理一些必然会失败的情况,如方法数超标之类的问题
try:
if not self.is_rooted():
# 通知QT4A助手开始监控应用安装
self.run_shell_cmd('am broadcast -a startInstallMonitor')
ret = self.run_shell_cmd(
cmdline, retry_count=1, timeout=timeout)
else:
proc = self.run_shell_cmd(
cmdline, True, sync=False) # 使用root权限安装
time0 = time.time()
close_popup_count = 0
while time.time() - time0 < timeout:
if proc.poll() != None:
ret = proc.communicate()[0]
break
elif time.time() - time0 > 10 and close_popup_count < 2:
# 有些系统上弹窗会出现很久,关掉弹窗可以避免超时
self.run_shell_cmd('input keyevent 4')
close_popup_count += 1
time.sleep(1)
else:
raise TimeoutError('Install package timeout')
if not b'Success' in ret:
logger.warn('install with root failed: %s' % ret)
if not b'INSTALL_' in ret.strip().split(b'\n')[-1]:
# 权限弹窗导致的安装失败
ret = self.run_as(
'system', cmdline, retry_count=1, timeout=timeout)
logger.debug(ret)
if b'Success' in ret:
return True, ret
elif i > 1 and b'INSTALL_FAILED_ALREADY_EXISTS' in ret:
# 出现至少一次超时,认为安装完成
return True, 'Success'
elif b'INSTALL_FAILED_ALREADY_EXISTS' in ret:
# 尝试覆盖安装
return self._install_apk(apk_path, package_name, True)
elif b'INSTALL_PARSE_FAILED_NO_CERTIFICATES' in ret or b'INSTALL_PARSE_FAILED_UNEXPECTED_EXCEPTION' in ret:
if i >= 2:
return False, ret
time.sleep(10)
continue
elif b'INSTALL_PARSE_FAILED_INCONSISTENT_CERTIFICATES' in ret or b'INSTALL_FAILED_DEXOPT' in ret or b'INSTALL_FAILED_UPDATE_INCOMPATIBLE' in ret:
# 必须卸载安装
if not reinstall:
return False, ret
self.uninstall_app(package_name)
return self._install_apk(apk_path, package_name, False)
elif b'INSTALL_FAILED_INSUFFICIENT_STORAGE' in ret:
# 有可能是存在/data/app-lib/packagename-1目录导致的
for i in (1, 2):
dir_path = '/data/app-lib/%s-%d' % (package_name, i)
if 'No such file or directory' in self.run_shell_cmd('ls -l %s' % dir_path, True):
continue
else:
self.delete_folder(dir_path)
break
else:
return False, ret
elif b'INSTALL_FAILED_UID_CHANGED' in ret or b'INSTALL_FAILED_INTERNAL_ERROR' in ret:
# /data/data目录下存在文件夹没有删除
dir_path = '/data/data/%s' % package_name
for _ in range(3):
# 防止删除没有成功
self.delete_folder(dir_path)
if b'No such file or directory' in self.run_shell_cmd('ls -l %s' % dir_path, True):
break
continue
elif b'INSTALL_FAILED_CANCELLED_BY_USER' in ret:
# 一般是ROM需要手动确认安装,改用system权限安装
ret = self.run_shell_cmd(
'su system %s' % cmdline, timeout=timeout)
if b'Success' in ret:
return True, ret
elif b'Error: Could not access the Package Manager' in ret:
# 设备出现问题,等待监控程序重启设备
time.sleep(30)
else:
return False, ret
except TimeoutError as e:
logger.warn('install app timeout: %r' % e)
else:
logger.warn('install app failed')
ret = self.run_shell_cmd(cmdline, timeout=timeout) # 改用非root权限安装
logger.debug(ret)
if b'Success' in ret or b'INSTALL_FAILED_ALREADY_EXISTS' in ret:
return True, 'Success'
return False, ret
def install_apk(self, apk_path, reinstall=False):
'''安装应用
'''
if not os.path.exists(apk_path):
raise RuntimeError('APK: %s 不存在' % apk_path)
package_name = self._get_package_name(apk_path)
tmp_path = '/data/local/tmp/%s.apk' % package_name
self.push_file(apk_path, tmp_path)
if not reinstall:
self.uninstall_app(package_name) # 先卸载,再安装
result = self._install_apk(tmp_path, package_name, reinstall)
else:
result = self._install_apk(tmp_path, package_name, reinstall)
# logger.debug(result)
if result[0] == False:
if not b'Failure' in result[1]:
# 一般这种情况都是由于adb server意外退出导致,此时安装过程还会继续
logger.warn('install app: %r' % result[1])
timeout = 30
time0 = time.time()
while time.time() - time0 < timeout:
# 等待应用安装完成
if self.get_package_path(package_name):
break
time.sleep(1)
else:
result = self._install_apk(
tmp_path, package_name, reinstall)
else:
err_msg = result[1]
if six.PY2:
if isinstance(err_msg, unicode):
err_msg = err_msg.encode('utf8')
if isinstance(package_name, unicode):
package_name = package_name.encode('utf8')
raise InstallPackageFailedError(
'安装应用%s失败:%s' % (package_name, err_msg))
try:
self.delete_file('/data/local/tmp/*.apk')
except TimeoutError:
pass
def uninstall_app(self, pkg_name):
'''卸载应用
'''
result = ''
if not self.get_package_path(pkg_name):
return True
for _ in range(5):
try:
result = self.run_adb_cmd(
'uninstall', pkg_name, retry_count=1, timeout=30)
break
except RuntimeError:
logger.exception('uninstall %s failed' % pkg_name)
time.sleep(10)
else:
raise
logger.debug('uninstall %s result: %r' % (pkg_name, result))
if self.is_rooted():
# 清理卸载可能遗留的cache文件
cpu_abi = 'arm'
if self.get_cpu_abi() == 'x86':
cpu_abi = 'x86' # TODO: 支持64位CPU
self.delete_file('/data/dalvik-cache/%s/data@app@%s-*' %
(cpu_abi, pkg_name))
return 'Success' in result
# ifndef __RELEASE__
@Deprecated('uninstall_app')
def uninstall_apk(self, pkg_name):
'''卸载应用
'''
return self.uninstall_app(pkg_name)
# endif
@encode_wrap
def get_package_path(self, pkg_name):
'''获取应用安装包路径
'''
for _ in range(3):
# 为避免某些情况下获取不到应用安装包路径,重试多次
result = self.run_shell_cmd('pm path %s' % pkg_name)
logger.debug('get_package_path: %r' % result)
pos = result.find('package:')
if pos >= 0:
return result[pos + 8:]
time.sleep(1)
return ''
@encode_wrap
def get_package_version(self, pkg_name):
'''获取应用版本
'''
result = self.run_shell_cmd('dumpsys package %s' % pkg_name)
for line in result.split('\n'):
line = line.strip()
if line.startswith('versionName='):
return line[12:]
@encode_wrap
def _build_intent_extra_string(self, extra):
'''构造intent参数列表
'''
extra_str = ''
for key in extra: # 指定额外参数
p_type = ''
value = extra[key]
if isinstance(value, bytes):
value = value.decode('utf8')
if value in ['true', 'false']:
p_type = 'z' # EXTRA_BOOLEAN_VALUE
elif isinstance(value, int):
if is_int(value):
p_type = 'i' # EXTRA_INT_VALUE
else:
p_type = 'l' # EXTRA_LONG_VALUE
elif isinstance(value, float):
p_type = 'f' # EXTRA_FLOAT_VALUE
elif value.startswith('file://'): # EXTRA_URI_VALUE
p_type = 'u'
param = '-e%s %s %s ' % (p_type, key,
('"%s"' % value) if not p_type else value)
if p_type:
param = u'-' + param
extra_str += param
if len(extra_str) > 0:
extra_str = extra_str[:-1]
return extra_str
@encode_wrap
def start_activity(self, activity_name, action='', type='', data_uri='', extra={}, wait=True):
'''打开一个Activity
Warning: Activity not started, intent has been delivered to currently running top-most instance.
Status: ok
ThisTime: 0
TotalTime: 0
WaitTime: 2
Complete
'''
if activity_name:
activity_name = '-n %s' % activity_name
if action: # 指定Action
action = '-a %s ' % action
if type:
type = '-t %s ' % type
if data_uri:
data_uri = '-d "%s" ' % data_uri
extra_str = self._build_intent_extra_string(extra)
W = u''
if wait:
W = '-W' # 等待启动完成才返回
# 如果/sbin/sh指向busybox,就会返回“/sbin/sh: am: not found”错误
# 返回am找不到是因为am缺少“#!/system/bin/sh”
command = 'am start %s %s %s%s%s%s' % (
W, activity_name, action, type, data_uri, extra_str)
if command[-1] == ' ':
command = command[:-1]
result = self.run_shell_cmd(command, timeout=15, retry_count=3)
if 'Permission Denial' in result or (wait and (not 'Activity:' in result or not 'Complete' in result)):
# 使用root权限运行
if self.is_rooted():
result = self.run_shell_cmd(
command, True, timeout=15, retry_count=3)
else:
package_name = activity_name.split('/')[0].split()[1]
result = self.run_as(package_name, command,
timeout=15, retry_count=3)
# raise RuntimeError('打开Activity失败:\n%s' % result)
if 'startActivityAndWait asks to run as user -2 but is calling from user 0' in result:
command += ' --user 0'
result = self.run_as(package_name, command,
timeout=15, retry_count=3)
logger.info('start activity command:%s' % command)
if 'Permission Denial' in result or ('run as user -2 but is calling from user 0' in result) or (wait and not 'Complete' in result):
raise RuntimeError('start activity failed: %s' % result)
ret_dict = {}
for line in result.split('\n'):
if ': ' in line:
key, value = line.split(': ')
ret_dict[key] = value
if 'Error' in ret_dict:
raise RuntimeError(ret_dict['Error'])
return ret_dict
def start_service(self, service_name, extra={}):
'''启动服务
'''
extra_str = self._build_intent_extra_string(extra)
command = 'am startservice -n %s %s' % (service_name, extra_str)
if command[-1] == ' ':
command = command[:-1]
result = self.run_shell_cmd(command, timeout=15, retry_count=3)
if 'no service started' in result or 'java.lang.SecurityException' in result:
raise RuntimeError('start service %s failed: %s' %
(service_name, result))
def stop_service(self, service_name):
'''停止服务
'''
result = self.run_shell_cmd(
'am stopservice -n %s' % service_name, timeout=15, retry_count=3)
if not 'Service stopped' in result and not 'was not running' in result:
raise RuntimeError('stop service failed: %s' % result)
def send_broadcast(self, action, extra={}):
'''发送广播
:param action: 广播使用的ACTION
:type action: string
:param extra: 额外参数
:type extra: dict
'''
extra_str = self._build_intent_extra_string(extra)
command = 'am broadcast -a %s %s' % (action, extra_str)
result = self.run_shell_cmd(command)
if not 'Broadcast completed: result=0' in result:
raise RuntimeError('Send broadcast failed: %s' % result)
def get_property(self, prop):
'''读取属性
'''
return self.run_shell_cmd('getprop %s' % prop)
def set_property(self, prop, value):
'''设置属性
'''
self.run_shell_cmd('setprop %s %s' % (prop, value), self.is_rooted())
@static_result
def get_cpu_abi(self):
'''获取系统的CPU架构信息
'''
ret = self.run_shell_cmd('getprop ro.product.cpu.abi')
if not ret:
ret = 'armeabi' # 有些手机可能没有这个系统属性
return ret
@static_result
def get_device_model(self):
'''获取设备型号
'''
model = self.run_shell_cmd('getprop ro.product.model')
brand = self.run_shell_cmd('getprop ro.product.brand')
if model.find(brand) >= 0:
return model
return '%s %s' % (brand, model)
@static_result
def get_system_version(self):
'''获取系统版本
'''
return self.run_shell_cmd('getprop ro.build.version.release')
@static_result
def get_sdk_version(self):
'''获取SDK版本
'''
return int(self.run_shell_cmd('getprop ro.build.version.sdk'))
def get_uid(self, app_name):
'''获取APP的uid
'''
result = self.run_shell_cmd('ls -l /data/data', True)
for line in result.split('\n'):
items = line.split(' ')
for item in items:
if not item:
continue
if item == app_name:
return items[1]
return None
def is_selinux_opened(self):
'''selinux是否是enforcing状态
'''
if self.get_sdk_version() < 18:
return False
return 'Enforcing' in self.run_shell_cmd('getenforce', True)
def close_selinux(self):
'''关闭selinux
'''
result = self.run_shell_cmd('setenforce 0', True)
if 'Permission denied' in result:
return False
return True
def chmod(self, file_path, attr):
'''修改文件/目录属性
:param file_path: 文件/目录路径
:type file_path: string
:param attr: 设置的属性值,如:777
:type attr: int
'''
def _parse(num):
num = str(num)
attr = ''
su_flag = ''
if len(num) == 4:
su_flag = int(num[0])
num = num[1:]
for c in num:
c = int(c)
if c & 4:
attr += 'r'
else:
attr += '-'
if c & 2:
attr += 'w'
else:
attr += '-'
if c & 1:
attr += 'x'
else:
attr += '-'
if su_flag and su_flag == 4:
attr = attr[:2] + 's' + attr[3:]
return attr
ret = self.run_shell_cmd('chmod %s %s' %
(attr, file_path), self.is_rooted())
dir_list, file_list = self.list_dir(file_path)
if len(dir_list) == 0 and len(file_list) == 1 and file_path.endswith('/' + file_list[0]['name']):
# 这是一个文件
new_attr = file_list[0]['attr']
else:
# 目录
dir_name = file_path.split('/')[-1]
parent_path = '/'.join(file_path.split('/')[:-1])
dir_list, _ = self.list_dir(parent_path)
for dir in dir_list:
if dir['name'] == dir_name:
new_attr = dir['attr']
break
if new_attr != _parse(attr):
logger.warn('chmod failed: %r(%s)' % (ret, new_attr))
return self.chmod(file_path, attr)
return new_attr
def chown(self, file_path, uid, gid):
'''修改文件的拥有者和群组
:param file_path: 文件路径
:type file_path: string
:param uid: 拥有者
:type uid: string
:param gid: 群组
:type gid: string
'''
self.run_shell_cmd('chown %s:%s %s' % (uid, gid, file_path), True)
def mkdir(self, dir_path, mod=None):
'''创建目录
'''
cmd = 'mkdir %s' % (dir_path)
ret = self.run_shell_cmd(cmd, self.is_rooted())
# if not 'File exists' in ret:
# #加了-p参数貌似不会返回这个提示信息
try:
self.list_dir(dir_path)
except RuntimeError as e:
logger.warn('mkdir %s failed: %s(%s)' % (dir_path, ret, e))
return self.mkdir(dir_path, mod)
# 修改权限
if mod != None:
self.chmod(dir_path, mod)
def list_dir(self, dir_path):
'''列取目录
'''
if ' ' in dir_path:
dir_path = '"%s"' % dir_path
use_root = self.is_rooted()
if use_root and dir_path.startswith('/sdcard') or dir_path.startswith('/storage/') or dir_path.startswith('/mnt/'):
# 部分手机上发现用root权限访问/sdcard路径不一致
use_root = False
result = self.run_shell_cmd('ls -l %s' % dir_path, use_root)
if 'Permission denied' in result:
raise PermissionError(result)
if 'No such file or directory' in result:
raise RuntimeError('file or directory %s not exist' % dir_path)
if 'Not a directory' in result:
raise RuntimeError(u'%s %s' % (dir_path, result))
dir_list = []
file_list = []
def _handle_name(name):
return name.split('/')[-1]
is_toybox = self.get_sdk_version() >= 24
is_busybox = None
# busybox格式 -rwxrwxrwx 1 shell shell 13652 Jun 3 10:56 /data/local/tmp/qt4a/inject
for line in result.split('\n'):
items = line.split()
if len(items) < 6:
continue # (6, 7, 9)
if not line[0] in ('-', 'd', 'l'):
continue
is_dir = items[0][0] == 'd' # 是否是目录
is_link = items[0][0] == 'l' # 软链
if is_busybox == None:
if is_toybox:
item = items[5] # 日期字段
else:
item = items[4] # 日期字段
if is_dir or is_link:
item = items[3] # 目录和软链没有size字段
pattern = re.compile(r'\d{4}-\d{2}-\d{2}')
if pattern.match(item):
is_busybox = False
else:
is_busybox = True
if not is_busybox:
# 防止文件名称中有空格
if not is_toybox:
if not is_dir and not is_link and len(items) > 7:
items[6] = line[line.find(items[6]):].strip()
elif is_dir and len(items) > 6:
items[5] = line[line.find(items[5]):].strip()
else:
if not is_dir and not is_link and len(items) > 8:
items[7] = line[line.find(items[7]):].strip()
elif is_dir and len(items) > 7:
items[6] = line[line.find(items[6]):].strip()
attrs = items[0]
if attrs[0] == 'd':
if is_busybox:
name = _handle_name(items[8])
elif is_toybox:
name = items[7]
else:
name = items[5]
dir_list.append({'name': name, 'attr': attrs[1:]})
elif attrs[0] == '-':
if is_busybox:
name = _handle_name(items[8])
size = int(items[4])
last_modify_time = items[7]
elif is_toybox:
name = _handle_name(items[7])
size = int(items[4])
last_modify_time = time.strptime(
'%s %s:00' % (items[5], items[6]), "%Y-%m-%d %X")
else:
name = items[6]
size = int(items[3])
try:
last_modify_time = time.strptime(
'%s %s:00' % (items[4], items[5]), "%Y-%m-%d %X")
except:
# TODO: 即将删掉,调试用
logger.info('line=%s' % line)
raise
file_list.append(
{'name': name, 'attr': attrs[1:], 'size': size, 'last_modify_time': last_modify_time})
elif attrs[0] == 'l': # link
if is_busybox:
name = _handle_name(items[8])
last_modify_time = items[7]
link = items[10]
elif is_toybox:
name = items[7]
last_modify_time = time.strptime(
'%s %s:00' % (items[5], items[6]), "%Y-%m-%d %X")
link = items[9]
else:
name = items[5]
last_modify_time = time.strptime(
'%s %s:00' % (items[3], items[4]), "%Y-%m-%d %X")
link = items[7]
file_list.append(
{'name': name, 'attr': attrs[1:], 'link': link, 'last_modify_time': last_modify_time})
return dir_list, file_list
def get_sdcard_path(self):
'''获取sdcard路径
'''
path = '/sdcard'
while True:
dir_list, file_list = self.list_dir(path)
if len(dir_list) == 0 and len(file_list) == 1 and 'link' in file_list[0]:
# another link
path = file_list[0]['link']
else:
break
return path
def get_file_info(self, file_path):
'''获取文件信息
'''
return self.list_dir(file_path)[1][0]
def copy_file(self, src_path, dst_path):
'''在手机上拷贝文件
'''
if not hasattr(self, '_has_cp'):
self._has_cp = 'not found' not in self.run_shell_cmd('cp')
if self._has_cp: # 不是所有的ROM都有cp命令
self.run_shell_cmd('cp %s %s' %
(src_path, dst_path), self.is_rooted())
else:
self.run_shell_cmd('cat %s > %s' % (
src_path, dst_path), self.is_rooted(), timeout=30) # 部分手机上发现此方法耗时较多
def delete_file(self, file_path):
'''删除手机上文件
'''
if '*' in file_path:
# 使用通配符时不能使用引号
self.run_shell_cmd('rm -f %s' % file_path, self.is_rooted())
else:
file_path = file_path.replace('"', r'\"')
self.run_shell_cmd('rm -f "%s"' % file_path, self.is_rooted())
def delete_folder(self, folder_path):
'''删除手机上的目录
'''
folder_path = folder_path.replace('"', r'\"')
self.run_shell_cmd('rm -R "%s"' % folder_path, self.is_rooted())
def run_as_by_app(self, package_name, cmdline, **kwargs):
'''在app中执行命令
'''
cmd_res_path = '/sdcard/qt4a_cmd_res.txt'
self.delete_file(cmd_res_path)
timeout = 30
if 'timeout' in kwargs:
timeout = kwargs['timeout']
try:
self.start_activity('%s/com.test.androidspy.inject.CmdExecuteActivity' %
package_name, extra={'cmdline': cmdline, 'timeout': timeout}, wait=False)
except Exception as e:
if 'com.test.androidspy.inject.CmdExecuteActivity} does not exist' in e.args[0]:
raise RuntimeError(
'该命令需要对apk重打包才能执行,请使用`qt4a-manage repack-apk -p /path/to/apk`命令进行重打包并安装后重试!')
raise
cmd_argv_list = cmdline.split()
if len(cmd_argv_list) > 1 and cmd_argv_list[0] == 'pm' and cmd_argv_list[1] == 'clear':
logger.info('run cmd:%s,return Success' % cmdline)
time.sleep(2)
return 'Success'
time0 = time.time()
while time.time() - time0 < timeout:
try:
self.list_dir(cmd_res_path)
result = self.run_shell_cmd("cat %s" % cmd_res_path)
return result
except RuntimeError as e:
logger.info('run_as_by_app exception:%s' % e)
time.sleep(1)
raise TimeoutError("run_as_by_app timeout:%d" % timeout)
def run_as(self, package_name, cmdline, **kwargs):
'''以package_name权限执行命令
'''
if self.is_rooted():
if self._need_quote:
cmdline = '"%s"' % cmdline
cmdline = 'su %s %s' % (package_name, cmdline)
return self.run_shell_cmd(cmdline, False, **kwargs)
if ':' in package_name:
package_name = package_name.split(':')[0] # 允许传入进程名
if '&&' in cmdline:
cmndline = 'run-as %s sh -c "%s"' % (package_name, cmdline)
else:
cmndline = 'run-as %s %s' % (package_name, cmdline)
result = self.run_shell_cmd(cmndline, **kwargs)
run_as_succ = False
if 'is unknown' in result:
logger.info('Package %s not installed' % package_name)
elif 'not debuggable' in result:
logger.info('Package %s is not debuggable' % package_name)
elif 'Could not set capabilities: Operation not permitted' in result:
logger.info('Samsung device has bug with run-as command')
elif 'run-as: exec failed for' in result:
raise RuntimeError(result)
else:
run_as_succ = True
if not run_as_succ:
try:
result = self.run_as_by_app(package_name, cmdline, **kwargs)
except RuntimeError:
logger.exception('run %s as %s by app failed' %
(cmdline, package_name))
raise PermissionError('run %s as %s failed' %
(cmdline, package_name))
return result
def is_app_process64(self, process):
'''是否是64位应用进程
:param process: 进程名或进程ID
:tytpe process: string/int
'''
process_name = ''
if isinstance(process, six.string_types) and not process.isdigit():
process_name = process
pid = self.get_pid(process)
else:
pid = int(process)
if pid <= 0:
raise ValueError('process %s not exist' % process)
if self.is_rooted():
return 'app_process64' in self.run_shell_cmd('ls -l /proc/%d/exe' % pid, True)
elif process_name:
return 'app_process64' in self.run_as(process_name, 'ls -l /proc/%d/exe' % pid)
else:
raise ValueError('Non root device must pass process name')
def _list_process(self):
'''获取进程列表
'''
cmdline = 'ps'
if self.get_sdk_version() >= 26:
cmdline += ' -A'
result = self.run_shell_cmd(cmdline).strip()
lines = result.split('\n')
busybox = False
if lines[0].startswith('PID'):
busybox = True
result_list = []
for i in range(1, len(lines)):
lines[i] = lines[i].strip()
if not lines[i]:
continue
items = lines[i].split()
if not busybox:
if len(items) < 9:
err_msg = "ps命令返回格式错误:\n%s" % lines[i]
if len(items) == 8:
result_list.append(
{'pid': int(items[1]), 'ppid': int(items[2]), 'proc_name': items[7]})
else:
raise RuntimeError(err_msg)
else:
proc_name = items[8]
if len(proc_name) <= 1 and len(items) > 9:
proc_name = items[9]
result_list.append(
{'pid': int(items[1]), 'ppid': int(items[2]), 'proc_name': proc_name})
else:
idx = 4
cmd = items[idx]
if len(cmd) == 1:
# 有时候发现此处会有“N”
idx += 1
cmd = items[idx]
idx += 1
if cmd[0] == '{' and cmd[-1] == '}':
cmd = items[idx]
ppid = 0
if items[1].isdigit():
ppid = int(items[1]) # 有些版本中没有ppid
result_list.append(
{'pid': int(items[0]), 'ppid': ppid, 'proc_name': cmd})
return result_list
def list_process(self):
'''获取进程列表
'''
for _ in range(3):
try:
return self._list_process()
except RuntimeError as e:
logger.warn('%s' % e)
else:
raise RuntimeError('获取进程列表失败')
def get_pid(self, proc_name):
'''获取进程ID
'''
process_list = self.list_process()
for process in process_list:
if process['proc_name'] == proc_name:
return process['pid']
return 0
def get_process_status(self, pid):
'''获取进程状态信息
'''
ret = self.run_shell_cmd('cat /proc/%d/status' % pid, True)
result = {}
for line in ret.split('\n'):
if not line:
continue
if not ':' in line:
logger.warn('get_process_status line error: %r' % line)
continue
key, value = line.split(':')
result[key] = value.strip()
return result
def get_process_user(self, pid):
'''get procees user name
:param pid: process id
:type pid: int
'''
uid = -1
cmdline = 'cat /proc/%d/status' % pid
result = self.run_shell_cmd(cmdline).strip()
for line in result.split('\n'):
line = line.strip()
if line.startswith('Uid:'):
uid = int(line.split()[1])
break
if uid < 0:
raise RuntimeError('get uid of process %d failed' % pid)
if uid < 10000:
return uid
cmdline = 'cat /proc/%d/cmdline' % pid
result = self.run_shell_cmd(cmdline).strip().split('\x00')[0]
if ':' in result:
result = result.split(':')[0]
return result
def kill_process(self, proc_name_or_pid):
'''杀进程
'''
kill_list = []
package_name = None
process_list = self.list_process()
for process in process_list:
if isinstance(proc_name_or_pid, six.string_types) and proc_name_or_pid in process['proc_name']:
if process['proc_name'] == proc_name_or_pid:
# 保证主进程首先被杀
kill_list.insert(0, process['pid'])
else:
kill_list.append(process['pid'])
elif process['pid'] == proc_name_or_pid:
kill_list.append(process['pid'])
if not kill_list:
return None # 没有找到对应的进程
if package_name == None and not self.is_rooted():
package_name = self.get_process_user(kill_list[0])
for i, pid in enumerate(kill_list):
kill_list[i] = 'kill -9 %d' % pid
cmd_line = ' && '.join(kill_list)
if package_name == 2000:
# shell process
result = self.run_shell_cmd(cmd_line)
elif self.is_rooted():
result = self.run_shell_cmd(cmd_line, True)
elif isinstance(package_name, six.string_types):
# package
result = self.run_as(package_name, cmd_line)
else:
raise PermissionError(
'can\'t kill uid=%s process in non-root device' % package_name)
if 'Operation not permitted' in result:
raise PermissionError('run %s failed: %s' % (cmd_line, result))
return True
def get_device_imei(self):
'''获取手机串号
'''
result = self.run_shell_cmd('dumpsys iphonesubinfo', self.is_rooted())
for line in result.split('\n'):
if line.find('Device ID') >= 0:
return line.split('=')[1].strip()
raise RuntimeError('获取imei号失败:%r' % result)
def get_cpu_total_time(self):
cpu_time = 0
result = self.run_shell_cmd('cat /proc/stat')
result = result.split('\n')[0]
for item in result.split(' '):
item = item.strip()
if not item:
continue
if item == 'cpu':
continue
cpu_time += int(item)
return cpu_time
def get_process_cpu_time(self, pid):
result = self.run_shell_cmd('cat /proc/%d/stat' % pid)
result = result.split(' ')
utime = int(result[13])
stime = int(result[14])
cutime = int(result[15])
cstime = int(result[16])
return utime + stime + cutime + cstime
def get_thread_cpu_time(self, pid, tid):
result = self.run_shell_cmd('cat /proc/%d/task/%d/stat' % (pid, tid))
result = result.split(' ')
utime = int(result[13])
stime = int(result[14])
cutime = int(result[15])
cstime = int(result[16])
return utime + stime + cutime + cstime
def get_process_cpu(self, proc_name, interval=0.1):
'''获取进程中每个线程的CPU占用率
'''
pid = self.get_pid(proc_name)
# print (pid)
if not pid:
return None
total_cpu1 = self.get_cpu_total_time()
process_cpu1 = self.get_process_cpu_time(pid)
thread_cpu1 = self.get_thread_cpu_time(pid, pid)
time.sleep(interval)
total_cpu2 = self.get_cpu_total_time()
process_cpu2 = self.get_process_cpu_time(pid)
thread_cpu2 = self.get_thread_cpu_time(pid, pid)
total_cpu = total_cpu2 - total_cpu1
process_cpu = process_cpu2 - process_cpu1
thread_cpu = thread_cpu2 - thread_cpu1
return process_cpu * 100 // total_cpu, thread_cpu * 100 // total_cpu
@staticmethod
def list_device():
'''获取设备列表
'''
return LocalADBBackend.list_device()
@staticmethod
def is_local_device(device_id):
'''是否是本地设备
'''
pattern = re.compile(r'([\w|\-|\.]+):(.+)')
mat = pattern.match(device_id)
if not mat or (mat.group(2).isdigit() and int(mat.group(2)) > 1024 and int(mat.group(2)) < 65536):
return True
else:
return False
@staticmethod
def open_device(name_or_backend=None):
'''打开设备
'''
if isinstance(name_or_backend, six.string_types):
adb_backend = LocalADBBackend.open_device(name_or_backend)
else:
adb_backend = name_or_backend
adb = ADB(adb_backend)
if adb.is_rooted() and adb.is_selinux_opened():
if not adb.close_selinux():
logger.warn('Close selinux failed')
# raise RuntimeError('关闭selinux失败,确认手机是否完美Root')
return adb
@staticmethod
def connect_device(name):
'''使用TCP连接设备
'''
proc = subprocess.Popen(
[adb_path, 'connect', name], stdout=subprocess.PIPE)
result = proc.stdout.read()
if result.find('unable to connect to') >= 0:
print(result, file=sys.stderr)
return False
return True
def get_cpu_time(self):
'''获取手机全局总时间片和空闲时间片
'''
import re
cpu_time = 0
result = self.run_shell_cmd('cat /proc/stat')
result = result.split('\n')[0]
result, num = re.subn(r'\s+', ' ', result) # 将字符串中多个相连的空白字符合并成一个空白字符
results = result.split(' ')
if len(results) < 5:
logger.warn('无法取得CPU时间片统计,请确保手机正常链接,并已启动!')
return 0, 0
idle_time = int(results[4])
for item in results:
item = item.strip()
if not item:
continue
if item == 'cpu':
continue
cpu_time += int(item)
return cpu_time, idle_time
def get_cpu_usage(self, interval=0.5):
'''获取手机全局CPU使用率
'''
total_time1, idle_time1 = self.get_cpu_time()
time.sleep(interval)
total_time2, idle_time2 = self.get_cpu_time()
total_time = total_time2 - total_time1
idle_time = idle_time2 - idle_time1
if total_time == 0:
return -1
return (total_time - idle_time) * 100 // total_time
@static_result
def is_art(self):
'''是否是art虚拟机
'''
ret = self.get_property('persist.sys.dalvik.vm.lib')
if not ret:
ret = self.get_property('persist.sys.dalvik.vm.lib.2')
return 'libart.so' in ret
def dump_stack(self, pid_or_procname):
'''获取进程调用堆栈
'''
if isinstance(pid_or_procname, six.string_types):
pid = self.get_pid(pid_or_procname)
else:
pid = pid_or_procname
anr_dir = '/data/anr'
try:
self.list_dir(anr_dir)
except RuntimeError:
self.mkdir(anr_dir)
self.chmod(anr_dir, 777)
cmd = 'kill -3 %d' % pid
self.run_shell_cmd(cmd, True)
return self.run_shell_cmd('cat %s/traces.txt' % anr_dir, True)
def get_state(self):
'''获取设备状态
'''
return self.run_adb_cmd('get-state')
if __name__ == '__main__':
pass
|
the-stack_0_13166 | """
Introducción a los Bloques condicionales.
Un bloque condicional puede probar diferentes condiciones
"""
import sys
try:
numero1 = int(input("Introduzca un primer número: "))
numero2 = int(input("Introduzca un segundo número: "))
except ValueError as e:
print("La conversión de al menos uno de los números no ha tenido éxito",
file=sys.stderr)
sys.exit()
# Hacer la comparación
if numero1 <= numero2:
print(numero1, "<=", numero2)
elif numero1 >= numero2:
print(numero1, ">=", numero2)
else:
print(numero1, "==", numero2)
|
the-stack_0_13169 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python configuration for SetInput interaction."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from extensions.interactions import base
class SetInput(base.BaseInteraction):
"""Interaction for input of an unordered set of strings."""
name = 'Set Input'
description = 'Allows learners to enter an unordered set of strings.'
display_mode = base.DISPLAY_MODE_INLINE
_dependency_ids = []
answer_type = 'SetOfUnicodeString'
instructions = None
narrow_instructions = None
needs_summary = False
can_have_solution = True
show_generic_submit_button = True
# NB: There used to be a UnicodeString-typed parameter here called
# 'element_type'. This has since been removed.
_customization_arg_specs = []
_answer_visualization_specs = [{
# Table with answer counts for top N answers.
'id': 'FrequencyTable',
'options': {
'column_headers': ['Answer', 'Count'],
'title': 'Top 10 answers',
},
'calculation_id': 'Top10AnswerFrequencies',
'addressed_info_is_supported': True,
}, {
# Table with most commonly submitted elements of set.
'id': 'FrequencyTable',
'options': {
'column_headers': ['Element', 'Count'],
'title': 'Commonly submitted elements',
},
'calculation_id': 'FrequencyCommonlySubmittedElements',
# Since individual answer elements are not generally intended to be
# used as a single response to SetInput interactions, we omit the
# addressed column entirely.
'addressed_info_is_supported': False,
}]
|
the-stack_0_13170 | import json
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
vmanage_session = requests.session()
# Reserved sandbox
vmanage_node = {
"host": "10.10.20.90",
"username": "admin",
"password": "C1sco12345",
"verify": False
}
data = {
"j_username": vmanage_node["username"],
"j_password": vmanage_node["password"]
}
headers = {
"Content-Type": "application/x-www-form-urlencoded"
}
URI = f"https://{vmanage_node['host']}"
def authenticate():
response = vmanage_session.post(url=f"{URI}/j_security_check", data=data, headers=headers, verify=vmanage_node.get("verify"))
if response.status_code != 200 or "html" in response.text:
print(f"Could not authenticate: {response.status_code}\nExiting")
exit()
# Get XSRF TOKEN
response = vmanage_session.get(f"{URI}/dataservice/client/token")
if response.status_code != 200 or "html" in response.text:
print(f"Could not get token: {response.status_code}\nExiting")
exit()
else:
vmanage_session.headers["X-XSRF-TOKEN"] = response.text
# TODO: User and Group
def get_admin_user():
response = vmanage_session.get(f"{URI}/dataservice/admin/user")
if response.status_code != 200 or "html" in response.text:
print(f"Could not get admin users: {response.status_code}")
return None
return response.json()["data"]
def post_admin_user(username: str, fullname: str, group: list, password: str):
data = {
"userName": username,
"description": fullname,
"group": group,
"password": password
}
headers = {"Content-Type": "application/json"}
response = vmanage_session.post(f"{URI}/dataservice/admin/user", data=json.dumps(data), headers=headers)
if response.status_code != 200 or "html" in response.text:
print(f"Could not add user: {response.status_code}")
def delete_admin_user(username: str):
response = vmanage_session.delete(f"{URI}/dataservice/admin/user/{username}")
if response.status_code != 200 or "html" in response.text:
print(f"Could not delete user: {response.status_code}")
print(response.text)
def get_admin_user_activeSessions():
# TODO: Response code is 403, why?
response = vmanage_session.get(f"{URI}/dataservice/admin/user/activeSessions")
if response.status_code != 200 or "html" in response.text:
print(f"Could not get admin users active sessions: {response.status_code}")
return None
return response.json()["data"]
def get_admin_user_role():
response = vmanage_session.get(f"{URI}/dataservice/admin/user/role")
if response.status_code != 200 or "html" in response.text:
print(f"Could not get admin user role: {response.status_code}")
return None
return response.json()
def get_admin_usergroup():
response = vmanage_session.get(f"{URI}/dataservice/admin/usergroup")
if response.status_code != 200 or "html" in response.text:
print(f"Could not get admin usergroup: {response.status_code}")
return None
return response.json()["data"]
# TODO: Audit Log
# TODO: Tenant Management
# TODO: Tenant Backup Restore
# TODO: Utility - Logging
# TODO: Utility - Security
if __name__ == "__main__":
authenticate()
###### GET ADMIN USERS AND PRINT THEM
print("=" * 80)
print("----- USERS")
print("=" * 80)
users = get_admin_user()
for user in users:
print(f" Username: {user['userName']}")
print(f" Full name: {user.get('description')}")
# Each user can be in multiple groups
print(" Groups: ", end="")
for group in user["group"]:
print(group, end=" ")
print()
print(" " + "=" * 75)
print()
##### GET ADMIN USER ACTIVE SESSIONS
print("=" * 80)
print("----- ACTIVE USER SESSIONS")
print("=" * 80)
user_sessions = get_admin_user_activeSessions()
print(f" {user_sessions}")
print()
##### CHECK IF THIS USER SESSION HAS ADMIN PRIVILEGES
print("=" * 80)
print("----- USER ADMIN ROLE")
print("=" * 80)
user_role = get_admin_user_role()
print(f" Username: {vmanage_node['username']} is admin: {user_role['isAdmin']}")
print()
##### GET USERGROUPS AND PRINT PRIVILEGES
print("=" * 80)
print("----- USERGROUP PRIVILEGES")
print("=" * 80)
usergroups = get_admin_usergroup()
for group in usergroups:
print(" " + "=" * 75)
print(f" Group: {group['groupName']}")
print(" " + "=" * 75)
print(" Tasks")
print(" " + "=" * 70)
for task in group["tasks"]:
if task.get("enabled"):
print(f" {task['feature']}:", end=" ")
if task.get("read"):
print("r", end="")
if task.get("write"):
print("w", end="")
print()
print()
##### ADD ADMIN USER
username = "pythonuser"
print("=" * 80)
print(f"ADDING USER: {username}")
print("=" * 80)
post_admin_user(username, "Python Automation", ["netadmin"], "cisco")
##### VERIFY
print("=" * 80)
print(f"VERIFYING {username} EXISTS")
print("=" * 80)
users = get_admin_user()
found = None
for user in users:
if user["userName"] == username:
print(f" Found user: {username}")
found = True
break
##### DELETE USER
if found:
print("=" * 80)
print(f"DELETING USER: {username}")
delete_admin_user(username)
##### VERIFY
print("=" * 80)
print(f"VERIFYING {username} DOESN'T EXISTS")
print("=" * 80)
users = get_admin_user()
found = None
for user in users:
if user["userName"] == username:
print(f" Found user: {username}")
found = True
break
if not found:
print(f" {username} not found")
vmanage_session.close()
|
the-stack_0_13171 | # Advice: use repr(our_file.read()) to print the full output of tqdm
# (else '\r' will replace the previous lines and you'll see only the latest.
from __future__ import unicode_literals
import sys
import csv
import re
import os
from nose import with_setup
from nose.plugins.skip import SkipTest
from nose.tools import assert_raises
from time import sleep
from tqdm import tqdm
from tqdm import trange
from tqdm import TqdmDeprecationWarning
from tqdm._tqdm import TMonitor
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from io import IOBase # to support unicode strings
class DeprecationError(Exception):
pass
# Ensure we can use `with closing(...) as ... :` syntax
if getattr(StringIO, '__exit__', False) and \
getattr(StringIO, '__enter__', False):
def closing(arg):
return arg
else:
from contextlib import closing
try:
_range = xrange
except NameError:
_range = range
try:
_unicode = unicode
except NameError:
_unicode = str
nt_and_no_colorama = False
if os.name == 'nt':
try:
import colorama # NOQA
except ImportError:
nt_and_no_colorama = True
# Regex definitions
# List of control characters
CTRLCHR = [r'\r', r'\n', r'\x1b\[A'] # Need to escape [ for regex
# Regular expressions compilation
RE_rate = re.compile(r'(\d+\.\d+)it/s')
RE_ctrlchr = re.compile("(%s)" % '|'.join(CTRLCHR)) # Match control chars
RE_ctrlchr_excl = re.compile('|'.join(CTRLCHR)) # Match and exclude ctrl chars
RE_pos = re.compile(r'((\x1b\[A|\r|\n)+((pos\d+) bar:\s+\d+%|\s{3,6})?)') # NOQA
class DiscreteTimer(object):
'''Virtual discrete time manager, to precisely control time for tests'''
def __init__(self):
self.t = 0.0
def sleep(self, t):
'''Sleep = increment the time counter (almost no CPU used)'''
self.t += t
def time(self):
'''Get the current time'''
return self.t
class FakeSleep(object):
'''Wait until the discrete timer reached the required time'''
def __init__(self, dtimer):
self.dtimer = dtimer
def sleep(self, t):
end = t + self.dtimer.t
while(self.dtimer.t < end):
sleep(0.0000001) # sleep a bit to interrupt (instead of pass)
def cpu_timify(t, timer=None):
'''Force tqdm to use the specified timer instead of system-wide time()'''
if timer is None:
timer = DiscreteTimer()
t._time = timer.time
t._sleep = timer.sleep
t.start_t = t.last_print_t = t._time()
return timer
def pretest():
if getattr(tqdm, "_instances", False):
n = len(tqdm._instances)
if n:
tqdm._instances.clear()
raise EnvironmentError(
"{0} `tqdm` instances still in existence PRE-test".format(n))
def posttest():
if getattr(tqdm, "_instances", False):
n = len(tqdm._instances)
if n:
tqdm._instances.clear()
raise EnvironmentError(
"{0} `tqdm` instances still in existence POST-test".format(n))
class UnicodeIO(IOBase):
''' Unicode version of StringIO '''
def __init__(self, *args, **kwargs):
super(UnicodeIO, self).__init__(*args, **kwargs)
self.encoding = 'U8' # io.StringIO supports unicode, but no encoding
self.text = ''
self.cursor = 0
def __len__(self):
return len(self.text)
def seek(self, offset):
self.cursor = offset
def tell(self):
return self.cursor
def write(self, s):
self.text = self.text[:self.cursor] + s + \
self.text[self.cursor + len(s):]
self.cursor += len(s)
def read(self, n=-1):
_cur = self.cursor
self.cursor = len(self) if n < 0 \
else min(_cur + n, len(self))
return self.text[_cur:self.cursor]
def getvalue(self):
return self.text
def get_bar(all_bars, i):
""" Get a specific update from a whole bar traceback """
# Split according to any used control characters
bars_split = RE_ctrlchr_excl.split(all_bars)
bars_split = list(filter(None, bars_split)) # filter out empty splits
return bars_split[i]
def progressbar_rate(bar_str):
return float(RE_rate.search(bar_str).group(1))
def squash_ctrlchars(s):
""" Apply control characters in a string just like a terminal display """
# List of supported control codes
ctrlcodes = [r'\r', r'\n', r'\x1b\[A']
# Init variables
curline = 0 # current line in our fake terminal
lines = [''] # state of our fake terminal
# Split input string by control codes
RE_ctrl = re.compile("(%s)" % ("|".join(ctrlcodes)), flags=re.DOTALL)
s_split = RE_ctrl.split(s)
s_split = filter(None, s_split) # filter out empty splits
# For each control character or message
for nextctrl in s_split:
# If it's a control character, apply it
if nextctrl == '\r':
# Carriage return
# Go to the beginning of the line
# simplified here: we just empty the string
lines[curline] = ''
elif nextctrl == '\n':
# Newline
# Go to the next line
if curline < (len(lines) - 1):
# If already exists, just move cursor
curline += 1
else:
# Else the new line is created
lines.append('')
curline += 1
elif nextctrl == '\x1b[A':
# Move cursor up
if curline > 0:
curline -= 1
else:
raise ValueError("Cannot go up, anymore!")
# Else, it is a message, we print it on current line
else:
lines[curline] += nextctrl
return lines
def test_format_interval():
""" Test time interval format """
format_interval = tqdm.format_interval
assert format_interval(60) == '01:00'
assert format_interval(6160) == '1:42:40'
assert format_interval(238113) == '66:08:33'
def test_format_meter():
""" Test statistics and progress bar formatting """
try:
unich = unichr
except NameError:
unich = chr
format_meter = tqdm.format_meter
assert format_meter(0, 1000, 13) == \
" 0%| | 0/1000 [00:13<?, ?it/s]"
assert format_meter(0, 1000, 13, ncols=68, prefix='desc: ') == \
"desc: 0%| | 0/1000 [00:13<?, ?it/s]"
assert format_meter(231, 1000, 392) == \
" 23%|" + unich(0x2588) * 2 + unich(0x258e) + \
" | 231/1000 [06:32<21:44, 1.70s/it]"
assert format_meter(10000, 1000, 13) == \
"10000it [00:13, 769.23it/s]"
assert format_meter(231, 1000, 392, ncols=56, ascii=True) == \
" 23%|" + '#' * 3 + '6' + \
" | 231/1000 [06:32<21:44, 1.70s/it]"
assert format_meter(100000, 1000, 13, unit_scale=True, unit='iB') == \
"100KiB [00:13, 7.69KiB/s]"
assert format_meter(100, 1000, 12, ncols=0, rate=7.33) == \
" 10% 100/1000 [00:12<02:02, 7.33it/s]"
# Check that bar_format correctly adapts {bar} size to the rest
assert format_meter(20, 100, 12, ncols=13, rate=8.1,
bar_format=r'{l_bar}{bar}|{n_fmt}/{total_fmt}') == \
" 20%|" + unich(0x258f) + "|20/100"
assert format_meter(20, 100, 12, ncols=14, rate=8.1,
bar_format=r'{l_bar}{bar}|{n_fmt}/{total_fmt}') == \
" 20%|" + unich(0x258d) + " |20/100"
# Check that bar_format can print only {bar} or just one side
assert format_meter(20, 100, 12, ncols=2, rate=8.1,
bar_format=r'{bar}') == \
unich(0x258d) + " "
assert format_meter(20, 100, 12, ncols=7, rate=8.1,
bar_format=r'{l_bar}{bar}') == \
" 20%|" + unich(0x258d) + " "
assert format_meter(20, 100, 12, ncols=6, rate=8.1,
bar_format=r'{bar}|test') == \
unich(0x258f) + "|test"
def test_si_format():
""" Test SI unit prefixes """
format_meter = tqdm.format_meter
assert '9.00 ' in format_meter(1, 9, 1, unit_scale=True, unit='B')
assert '99.0 ' in format_meter(1, 99, 1, unit_scale=True)
assert '999 ' in format_meter(1, 999, 1, unit_scale=True)
assert '9.99K ' in format_meter(1, 9994, 1, unit_scale=True)
assert '10.0K ' in format_meter(1, 9999, 1, unit_scale=True)
assert '99.5K ' in format_meter(1, 99499, 1, unit_scale=True)
assert '100K ' in format_meter(1, 99999, 1, unit_scale=True)
assert '1.00M ' in format_meter(1, 999999, 1, unit_scale=True)
assert '1.00G ' in format_meter(1, 999999999, 1, unit_scale=True)
assert '1.00T ' in format_meter(1, 999999999999, 1, unit_scale=True)
assert '1.00P ' in format_meter(1, 999999999999999, 1, unit_scale=True)
assert '1.00E ' in format_meter(1, 999999999999999999, 1, unit_scale=True)
assert '1.00Z ' in format_meter(1, 999999999999999999999, 1,
unit_scale=True)
assert '1.0Y ' in format_meter(1, 999999999999999999999999, 1,
unit_scale=True)
assert '10.0Y ' in format_meter(1, 9999999999999999999999999, 1,
unit_scale=True)
assert '100.0Y ' in format_meter(1, 99999999999999999999999999, 1,
unit_scale=True)
assert '1000.0Y ' in format_meter(1, 999999999999999999999999999, 1,
unit_scale=True)
@with_setup(pretest, posttest)
def test_all_defaults():
""" Test default kwargs """
with closing(UnicodeIO()) as our_file:
with tqdm(range(10), file=our_file) as progressbar:
assert len(progressbar) == 10
for _ in progressbar:
pass
# restore stdout/stderr output for `nosetest` interface
# try:
# sys.stderr.write('\x1b[A')
# except:
# pass
sys.stderr.write('\rTest default kwargs ... ')
@with_setup(pretest, posttest)
def test_iterate_over_csv_rows():
""" Test csv iterator """
# Create a test csv pseudo file
with closing(StringIO()) as test_csv_file:
writer = csv.writer(test_csv_file)
for _ in _range(3):
writer.writerow(['test'] * 3)
test_csv_file.seek(0)
# Test that nothing fails if we iterate over rows
reader = csv.DictReader(test_csv_file,
fieldnames=('row1', 'row2', 'row3'))
with closing(StringIO()) as our_file:
for _ in tqdm(reader, file=our_file):
pass
@with_setup(pretest, posttest)
def test_file_output():
""" Test output to arbitrary file-like objects """
with closing(StringIO()) as our_file:
for i in tqdm(_range(3), file=our_file):
if i == 1:
our_file.seek(0)
assert '0/3' in our_file.read()
@with_setup(pretest, posttest)
def test_leave_option():
""" Test `leave=True` always prints info about the last iteration """
with closing(StringIO()) as our_file:
for _ in tqdm(_range(3), file=our_file, leave=True):
pass
our_file.seek(0)
assert '| 3/3 ' in our_file.read()
our_file.seek(0)
assert '\n' == our_file.read()[-1] # not '\r'
with closing(StringIO()) as our_file2:
for _ in tqdm(_range(3), file=our_file2, leave=False):
pass
our_file2.seek(0)
assert '| 3/3 ' not in our_file2.read()
@with_setup(pretest, posttest)
def test_trange():
""" Test trange """
with closing(StringIO()) as our_file:
for _ in trange(3, file=our_file, leave=True):
pass
our_file.seek(0)
assert '| 3/3 ' in our_file.read()
with closing(StringIO()) as our_file2:
for _ in trange(3, file=our_file2, leave=False):
pass
our_file2.seek(0)
assert '| 3/3 ' not in our_file2.read()
@with_setup(pretest, posttest)
def test_min_interval():
""" Test mininterval """
with closing(StringIO()) as our_file:
for _ in tqdm(_range(3), file=our_file, mininterval=1e-10):
pass
our_file.seek(0)
assert " 0%| | 0/3 [00:00<" in our_file.read()
@with_setup(pretest, posttest)
def test_max_interval():
""" Test maxinterval """
total = 100
bigstep = 10
smallstep = 5
# Test without maxinterval
timer = DiscreteTimer()
with closing(StringIO()) as our_file:
with closing(StringIO()) as our_file2:
# with maxinterval but higher than loop sleep time
t = tqdm(total=total, file=our_file, miniters=None, mininterval=0,
smoothing=1, maxinterval=1e-2)
cpu_timify(t, timer)
# without maxinterval
t2 = tqdm(total=total, file=our_file2, miniters=None,
mininterval=0, smoothing=1, maxinterval=None)
cpu_timify(t2, timer)
assert t.dynamic_miniters
assert t2.dynamic_miniters
# Increase 10 iterations at once
t.update(bigstep)
t2.update(bigstep)
# The next iterations should not trigger maxinterval (step 10)
for _ in _range(4):
t.update(smallstep)
t2.update(smallstep)
timer.sleep(1e-5)
t.close() # because PyPy doesn't gc immediately
t2.close() # as above
our_file2.seek(0)
assert "25%" not in our_file2.read()
our_file.seek(0)
assert "25%" not in our_file.read()
# Test with maxinterval effect
timer = DiscreteTimer()
with closing(StringIO()) as our_file:
with tqdm(total=total, file=our_file, miniters=None, mininterval=0,
smoothing=1, maxinterval=1e-4) as t:
cpu_timify(t, timer)
# Increase 10 iterations at once
t.update(bigstep)
# The next iterations should trigger maxinterval (step 5)
for _ in _range(4):
t.update(smallstep)
timer.sleep(1e-2)
our_file.seek(0)
assert "25%" in our_file.read()
# Test iteration based tqdm with maxinterval effect
timer = DiscreteTimer()
with closing(StringIO()) as our_file:
with tqdm(_range(total), file=our_file, miniters=None,
mininterval=1e-5, smoothing=1, maxinterval=1e-4) as t2:
cpu_timify(t2, timer)
for i in t2:
if i >= (bigstep - 1) and \
((i - (bigstep - 1)) % smallstep) == 0:
timer.sleep(1e-2)
if i >= 3 * bigstep:
break
our_file.seek(0)
assert "15%" in our_file.read()
# Test different behavior with and without mininterval
timer = DiscreteTimer()
total = 1000
mininterval = 0.1
maxinterval = 10
with closing(StringIO()) as our_file:
with tqdm(total=total, file=our_file, miniters=None, smoothing=1,
mininterval=mininterval, maxinterval=maxinterval) as tm1:
with tqdm(total=total, file=our_file, miniters=None, smoothing=1,
mininterval=0, maxinterval=maxinterval) as tm2:
cpu_timify(tm1, timer)
cpu_timify(tm2, timer)
# Fast iterations, check if dynamic_miniters triggers
timer.sleep(mininterval) # to force update for t1
tm1.update(total/2)
tm2.update(total/2)
assert int(tm1.miniters) == tm2.miniters == total/2
# Slow iterations, check different miniters if mininterval
timer.sleep(maxinterval*2)
tm1.update(total/2)
tm2.update(total/2)
res = [tm1.miniters, tm2.miniters]
assert res == [
(total/2)*mininterval/(maxinterval*2),
(total/2)*maxinterval/(maxinterval*2)
]
# Same with iterable based tqdm
timer1 = DiscreteTimer() # need 2 timers for each bar because zip not work
timer2 = DiscreteTimer()
total = 100
mininterval = 0.1
maxinterval = 10
with closing(StringIO()) as our_file:
t1 = tqdm(_range(total), file=our_file, miniters=None, smoothing=1,
mininterval=mininterval, maxinterval=maxinterval)
t2 = tqdm(_range(total), file=our_file, miniters=None, smoothing=1,
mininterval=0, maxinterval=maxinterval)
cpu_timify(t1, timer1)
cpu_timify(t2, timer2)
for i in t1:
if i == ((total/2)-2):
timer1.sleep(mininterval)
if i == (total-1):
timer1.sleep(maxinterval*2)
for i in t2:
if i == ((total/2)-2):
timer2.sleep(mininterval)
if i == (total-1):
timer2.sleep(maxinterval*2)
assert t1.miniters == 0.255
assert t2.miniters == 0.5
t1.close()
t2.close()
@with_setup(pretest, posttest)
def test_min_iters():
""" Test miniters """
with closing(StringIO()) as our_file:
for _ in tqdm(_range(3), file=our_file, leave=True, miniters=4):
our_file.write('blank\n')
our_file.seek(0)
assert '\nblank\nblank\n' in our_file.read()
with closing(StringIO()) as our_file:
for _ in tqdm(_range(3), file=our_file, leave=True, miniters=1):
our_file.write('blank\n')
our_file.seek(0)
# assume automatic mininterval = 0 means intermediate output
assert '| 3/3 ' in our_file.read()
@with_setup(pretest, posttest)
def test_dynamic_min_iters():
""" Test purely dynamic miniters (and manual updates and __del__) """
with closing(StringIO()) as our_file:
total = 10
t = tqdm(total=total, file=our_file, miniters=None, mininterval=0,
smoothing=1)
t.update()
# Increase 3 iterations
t.update(3)
# The next two iterations should be skipped because of dynamic_miniters
t.update()
t.update()
# The third iteration should be displayed
t.update()
our_file.seek(0)
out = our_file.read()
assert t.dynamic_miniters
t.__del__() # simulate immediate del gc
assert ' 0%| | 0/10 [00:00<' in out
assert '40%' in out
assert '50%' not in out
assert '60%' not in out
assert '70%' in out
# Check with smoothing=0, miniters should be set to max update seen so far
with closing(StringIO()) as our_file:
total = 10
t = tqdm(total=total, file=our_file, miniters=None, mininterval=0,
smoothing=0)
t.update()
t.update(2)
t.update(5) # this should be stored as miniters
t.update(1)
our_file.seek(0)
out = our_file.read()
assert t.dynamic_miniters and not t.smoothing
assert t.miniters == 5
t.close()
# Check iterable based tqdm
with closing(StringIO()) as our_file:
t = tqdm(_range(10), file=our_file, miniters=None, mininterval=None,
smoothing=0.5)
for _ in t:
pass
assert t.dynamic_miniters
# No smoothing
with closing(StringIO()) as our_file:
t = tqdm(_range(10), file=our_file, miniters=None, mininterval=None,
smoothing=0)
for _ in t:
pass
assert t.dynamic_miniters
# No dynamic_miniters (miniters is fixed manually)
with closing(StringIO()) as our_file:
t = tqdm(_range(10), file=our_file, miniters=1, mininterval=None)
for _ in t:
pass
assert not t.dynamic_miniters
@with_setup(pretest, posttest)
def test_big_min_interval():
""" Test large mininterval """
with closing(StringIO()) as our_file:
for _ in tqdm(_range(2), file=our_file, mininterval=1E10):
pass
our_file.seek(0)
assert '50%' not in our_file.read()
with closing(StringIO()) as our_file:
with tqdm(_range(2), file=our_file, mininterval=1E10) as t:
t.update()
t.update()
our_file.seek(0)
assert '50%' not in our_file.read()
@with_setup(pretest, posttest)
def test_smoothed_dynamic_min_iters():
""" Test smoothed dynamic miniters """
timer = DiscreteTimer()
with closing(StringIO()) as our_file:
with tqdm(total=100, file=our_file, miniters=None, mininterval=0,
smoothing=0.5, maxinterval=0) as t:
cpu_timify(t, timer)
# Increase 10 iterations at once
t.update(10)
# The next iterations should be partially skipped
for _ in _range(2):
t.update(4)
for _ in _range(20):
t.update()
our_file.seek(0)
out = our_file.read()
assert t.dynamic_miniters
assert ' 0%| | 0/100 [00:00<' in out
assert '10%' in out
assert '14%' not in out
assert '18%' in out
assert '20%' not in out
assert '25%' in out
assert '30%' not in out
assert '32%' in out
@with_setup(pretest, posttest)
def test_smoothed_dynamic_min_iters_with_min_interval():
""" Test smoothed dynamic miniters with mininterval """
timer = DiscreteTimer()
# In this test, `miniters` should gradually decline
total = 100
with closing(StringIO()) as our_file:
# Test manual updating tqdm
with tqdm(total=total, file=our_file, miniters=None, mininterval=1e-3,
smoothing=1, maxinterval=0) as t:
cpu_timify(t, timer)
t.update(10)
timer.sleep(1e-2)
for _ in _range(4):
t.update()
timer.sleep(1e-2)
our_file.seek(0)
out = our_file.read()
assert t.dynamic_miniters
with closing(StringIO()) as our_file:
# Test iteration-based tqdm
with tqdm(_range(total), file=our_file, miniters=None,
mininterval=0.01, smoothing=1, maxinterval=0) as t2:
cpu_timify(t2, timer)
for i in t2:
if i >= 10:
timer.sleep(0.1)
if i >= 14:
break
our_file.seek(0)
out2 = our_file.read()
assert t.dynamic_miniters
assert ' 0%| | 0/100 [00:00<' in out
assert '11%' in out and '11%' in out2
# assert '12%' not in out and '12%' in out2
assert '13%' in out and '13%' in out2
assert '14%' in out and '14%' in out2
@with_setup(pretest, posttest)
def test_disable():
""" Test disable """
with closing(StringIO()) as our_file:
for _ in tqdm(_range(3), file=our_file, disable=True):
pass
our_file.seek(0)
assert our_file.read() == ''
with closing(StringIO()) as our_file:
progressbar = tqdm(total=3, file=our_file, miniters=1, disable=True)
progressbar.update(3)
progressbar.close()
our_file.seek(0)
assert our_file.read() == ''
@with_setup(pretest, posttest)
def test_unit():
""" Test SI unit prefix """
with closing(StringIO()) as our_file:
for _ in tqdm(_range(3), file=our_file, miniters=1, unit="bytes"):
pass
our_file.seek(0)
assert 'bytes/s' in our_file.read()
@with_setup(pretest, posttest)
def test_ascii():
""" Test ascii/unicode bar """
# Test ascii autodetection
with closing(StringIO()) as our_file:
with tqdm(total=10, file=our_file, ascii=None) as t:
assert t.ascii # TODO: this may fail in the future
# Test ascii bar
with closing(StringIO()) as our_file:
for _ in tqdm(_range(3), total=15, file=our_file, miniters=1,
mininterval=0, ascii=True):
pass
our_file.seek(0)
res = our_file.read().strip("\r").split("\r")
assert '7%|6' in res[1]
assert '13%|#3' in res[2]
assert '20%|##' in res[3]
# Test unicode bar
with closing(UnicodeIO()) as our_file:
with tqdm(total=15, file=our_file, ascii=False, mininterval=0) as t:
for _ in _range(3):
t.update()
our_file.seek(0)
res = our_file.read().strip("\r").split("\r")
assert "7%|\u258b" in res[1]
assert "13%|\u2588\u258e" in res[2]
assert "20%|\u2588\u2588" in res[3]
@with_setup(pretest, posttest)
def test_update():
""" Test manual creation and updates """
with closing(StringIO()) as our_file:
with tqdm(total=2, file=our_file, miniters=1, mininterval=0) \
as progressbar:
assert len(progressbar) == 2
progressbar.update(2)
our_file.seek(0)
assert '| 2/2' in our_file.read()
progressbar.desc = 'dynamically notify of 4 increments in total'
progressbar.total = 4
try:
progressbar.update(-10)
except ValueError as e:
if str(e) != "n (-10) cannot be negative":
raise
progressbar.update() # should default to +1
else:
raise ValueError("Should not support negative updates")
our_file.seek(0)
res = our_file.read()
assert '| 3/4 ' in res
assert 'dynamically notify of 4 increments in total' in res
@with_setup(pretest, posttest)
def test_close():
""" Test manual creation and closure and n_instances """
# With `leave` option
with closing(StringIO()) as our_file:
progressbar = tqdm(total=3, file=our_file, miniters=10)
progressbar.update(3)
assert '| 3/3 ' not in our_file.getvalue() # Should be blank
assert len(tqdm._instances) == 1
progressbar.close()
assert len(tqdm._instances) == 0
assert '| 3/3 ' in our_file.getvalue()
# Without `leave` option
with closing(StringIO()) as our_file:
progressbar = tqdm(total=3, file=our_file, miniters=10, leave=False)
progressbar.update(3)
progressbar.close()
assert '| 3/3 ' not in our_file.getvalue() # Should be blank
# With all updates
with closing(StringIO()) as our_file:
assert len(tqdm._instances) == 0
with tqdm(total=3, file=our_file, miniters=0, mininterval=0,
leave=True) as progressbar:
assert len(tqdm._instances) == 1
progressbar.update(3)
res = our_file.getvalue()
assert '| 3/3 ' in res # Should be blank
# close() called
assert len(tqdm._instances) == 0
our_file.seek(0)
exres = res + '\n'
if exres != our_file.read():
our_file.seek(0)
raise AssertionError("\nExpected:\n{0}\nGot:{1}\n".format(
exres, our_file.read()))
# Closing after the output stream has closed
with closing(StringIO()) as our_file:
t = tqdm(total=2, file=our_file)
t.update()
t.update()
t.close()
@with_setup(pretest, posttest)
def test_smoothing():
""" Test exponential weighted average smoothing """
timer = DiscreteTimer()
# -- Test disabling smoothing
with closing(StringIO()) as our_file:
with tqdm(_range(3), file=our_file, smoothing=None, leave=True) as t:
cpu_timify(t, timer)
for _ in t:
pass
our_file.seek(0)
assert '| 3/3 ' in our_file.read()
# -- Test smoothing
# Compile the regex to find the rate
# 1st case: no smoothing (only use average)
with closing(StringIO()) as our_file2:
with closing(StringIO()) as our_file:
t = tqdm(_range(3), file=our_file2, smoothing=None, leave=True,
miniters=1, mininterval=0)
cpu_timify(t, timer)
with tqdm(_range(3), file=our_file, smoothing=None, leave=True,
miniters=1, mininterval=0) as t2:
cpu_timify(t2, timer)
for i in t2:
# Sleep more for first iteration and
# see how quickly rate is updated
if i == 0:
timer.sleep(0.01)
else:
# Need to sleep in all iterations
# to calculate smoothed rate
# (else delta_t is 0!)
timer.sleep(0.001)
t.update()
n_old = len(tqdm._instances)
t.close()
assert len(tqdm._instances) == n_old - 1
# Get result for iter-based bar
a = progressbar_rate(get_bar(our_file.getvalue(), 3))
# Get result for manually updated bar
a2 = progressbar_rate(get_bar(our_file2.getvalue(), 3))
# 2nd case: use max smoothing (= instant rate)
with closing(StringIO()) as our_file2:
with closing(StringIO()) as our_file:
t = tqdm(_range(3), file=our_file2, smoothing=1, leave=True,
miniters=1, mininterval=0)
cpu_timify(t, timer)
with tqdm(_range(3), file=our_file, smoothing=1, leave=True,
miniters=1, mininterval=0) as t2:
cpu_timify(t2, timer)
for i in t2:
if i == 0:
timer.sleep(0.01)
else:
timer.sleep(0.001)
t.update()
t.close()
# Get result for iter-based bar
b = progressbar_rate(get_bar(our_file.getvalue(), 3))
# Get result for manually updated bar
b2 = progressbar_rate(get_bar(our_file2.getvalue(), 3))
# 3rd case: use medium smoothing
with closing(StringIO()) as our_file2:
with closing(StringIO()) as our_file:
t = tqdm(_range(3), file=our_file2, smoothing=0.5, leave=True,
miniters=1, mininterval=0)
cpu_timify(t, timer)
t2 = tqdm(_range(3), file=our_file, smoothing=0.5, leave=True,
miniters=1, mininterval=0)
cpu_timify(t2, timer)
for i in t2:
if i == 0:
timer.sleep(0.01)
else:
timer.sleep(0.001)
t.update()
t2.close()
t.close()
# Get result for iter-based bar
c = progressbar_rate(get_bar(our_file.getvalue(), 3))
# Get result for manually updated bar
c2 = progressbar_rate(get_bar(our_file2.getvalue(), 3))
# Check that medium smoothing's rate is between no and max smoothing rates
assert a < c < b
assert a2 < c2 < b2
@with_setup(pretest, posttest)
def test_deprecated_nested():
""" Test nested progress bars """
if nt_and_no_colorama:
raise SkipTest
# TODO: test degradation on windows without colorama?
# Artificially test nested loop printing
# Without leave
our_file = StringIO()
try:
tqdm(total=2, file=our_file, nested=True)
except TqdmDeprecationWarning:
if """`nested` is deprecated and automated.\
Use position instead for manual control.""" not in our_file.getvalue():
raise
else:
raise DeprecationError("Should not allow nested kwarg")
@with_setup(pretest, posttest)
def test_bar_format():
""" Test custom bar formatting """
with closing(StringIO()) as our_file:
bar_format = r'{l_bar}{bar}|{n_fmt}/{total_fmt}-{n}/{total}{percentage}{rate}{rate_fmt}{elapsed}{remaining}' # NOQA
for _ in trange(2, file=our_file, leave=True, bar_format=bar_format):
pass
out = our_file.getvalue()
assert "\r 0%| |0/2-0/20.0None?it/s00:00?\r" in out
# Test unicode string auto conversion
with closing(StringIO()) as our_file:
bar_format = r'hello world'
with tqdm(ascii=False, bar_format=bar_format, file=our_file) as t:
assert isinstance(t.bar_format, _unicode)
@with_setup(pretest, posttest)
def test_unpause():
""" Test unpause """
timer = DiscreteTimer()
with closing(StringIO()) as our_file:
t = trange(10, file=our_file, leave=True, mininterval=0)
cpu_timify(t, timer)
timer.sleep(0.01)
t.update()
timer.sleep(0.01)
t.update()
timer.sleep(0.1) # longer wait time
t.unpause()
timer.sleep(0.01)
t.update()
timer.sleep(0.01)
t.update()
t.close()
r_before = progressbar_rate(get_bar(our_file.getvalue(), 2))
r_after = progressbar_rate(get_bar(our_file.getvalue(), 3))
assert r_before == r_after
@with_setup(pretest, posttest)
def test_position():
""" Test positioned progress bars """
if nt_and_no_colorama:
raise SkipTest
# Artificially test nested loop printing
# Without leave
our_file = StringIO()
t = tqdm(total=2, file=our_file, miniters=1, mininterval=0,
maxinterval=0, desc='pos2 bar', leave=False, position=2)
t.update()
t.close()
our_file.seek(0)
out = our_file.read()
res = [m[0] for m in RE_pos.findall(out)]
exres = ['\n\n\rpos2 bar: 0%',
'\x1b[A\x1b[A\n\n\rpos2 bar: 50%',
'\x1b[A\x1b[A\n\n\r ',
'\x1b[A\x1b[A']
if res != exres:
raise AssertionError("\nExpected:\n{0}\nGot:\n{1}\nRaw:\n{2}\n".format(
str(exres), str(res), str([out])))
# Test iteration-based tqdm positioning
our_file = StringIO()
for _ in trange(2, file=our_file, miniters=1, mininterval=0,
maxinterval=0, desc='pos0 bar', position=0):
for _ in trange(2, file=our_file, miniters=1, mininterval=0,
maxinterval=0, desc='pos1 bar', position=1):
for _ in trange(2, file=our_file, miniters=1, mininterval=0,
maxinterval=0, desc='pos2 bar', position=2):
pass
our_file.seek(0)
out = our_file.read()
res = [m[0] for m in RE_pos.findall(out)]
exres = ['\rpos0 bar: 0%',
'\n\rpos1 bar: 0%',
'\x1b[A\n\n\rpos2 bar: 0%',
'\x1b[A\x1b[A\n\n\rpos2 bar: 50%',
'\x1b[A\x1b[A\n\n\rpos2 bar: 100%',
'\x1b[A\x1b[A\n\n\x1b[A\x1b[A\n\rpos1 bar: 50%',
'\x1b[A\n\n\rpos2 bar: 0%',
'\x1b[A\x1b[A\n\n\rpos2 bar: 50%',
'\x1b[A\x1b[A\n\n\rpos2 bar: 100%',
'\x1b[A\x1b[A\n\n\x1b[A\x1b[A\n\rpos1 bar: 100%',
'\x1b[A\n\x1b[A\rpos0 bar: 50%',
'\n\rpos1 bar: 0%',
'\x1b[A\n\n\rpos2 bar: 0%',
'\x1b[A\x1b[A\n\n\rpos2 bar: 50%',
'\x1b[A\x1b[A\n\n\rpos2 bar: 100%',
'\x1b[A\x1b[A\n\n\x1b[A\x1b[A\n\rpos1 bar: 50%',
'\x1b[A\n\n\rpos2 bar: 0%',
'\x1b[A\x1b[A\n\n\rpos2 bar: 50%',
'\x1b[A\x1b[A\n\n\rpos2 bar: 100%',
'\x1b[A\x1b[A\n\n\x1b[A\x1b[A\n\rpos1 bar: 100%',
'\x1b[A\n\x1b[A\rpos0 bar: 100%',
'\n']
if res != exres:
raise AssertionError("\nExpected:\n{0}\nGot:\n{1}\nRaw:\n{2}\n".format(
str(exres), str(res), str([out])))
# Test manual tqdm positioning
our_file = StringIO()
t1 = tqdm(total=2, file=our_file, miniters=1, mininterval=0,
maxinterval=0, desc='pos0 bar', position=0)
t2 = tqdm(total=2, file=our_file, miniters=1, mininterval=0,
maxinterval=0, desc='pos1 bar', position=1)
t3 = tqdm(total=2, file=our_file, miniters=1, mininterval=0,
maxinterval=0, desc='pos2 bar', position=2)
for _ in _range(2):
t1.update()
t3.update()
t2.update()
our_file.seek(0)
out = our_file.read()
res = [m[0] for m in RE_pos.findall(out)]
exres = ['\rpos0 bar: 0%',
'\n\rpos1 bar: 0%',
'\x1b[A\n\n\rpos2 bar: 0%',
'\x1b[A\x1b[A\rpos0 bar: 50%',
'\n\n\rpos2 bar: 50%',
'\x1b[A\x1b[A\n\rpos1 bar: 50%',
'\x1b[A\rpos0 bar: 100%',
'\n\n\rpos2 bar: 100%',
'\x1b[A\x1b[A\n\rpos1 bar: 100%',
'\x1b[A']
if res != exres:
raise AssertionError("\nExpected:\n{0}\nGot:\n{1}\nRaw:\n{2}\n".format(
str(exres), str(res), str([out])))
t1.close()
t2.close()
t3.close()
# Test auto repositionning of bars when a bar is prematurely closed
# tqdm._instances.clear() # reset number of instances
with closing(StringIO()) as our_file:
t1 = tqdm(total=10, file=our_file, desc='pos0 bar', mininterval=0)
t2 = tqdm(total=10, file=our_file, desc='pos1 bar', mininterval=0)
t3 = tqdm(total=10, file=our_file, desc='pos2 bar', mininterval=0)
res = [m[0] for m in RE_pos.findall(our_file.getvalue())]
exres = ['\rpos0 bar: 0%',
'\n\rpos1 bar: 0%',
'\x1b[A\n\n\rpos2 bar: 0%',
'\x1b[A\x1b[A']
if res != exres:
raise AssertionError(
"\nExpected:\n{0}\nGot:\n{1}\n".format(
str(exres), str(res)))
t2.close()
t4 = tqdm(total=10, file=our_file, desc='pos3 bar', mininterval=0)
t1.update(1)
t3.update(1)
t4.update(1)
res = [m[0] for m in RE_pos.findall(our_file.getvalue())]
exres = ['\rpos0 bar: 0%',
'\n\rpos1 bar: 0%',
'\x1b[A\n\n\rpos2 bar: 0%',
'\x1b[A\x1b[A\n\x1b[A\n\n\rpos3 bar: 0%',
'\x1b[A\x1b[A\rpos0 bar: 10%',
'\n\rpos2 bar: 10%',
'\x1b[A\n\n\rpos3 bar: 10%',
'\x1b[A\x1b[A']
if res != exres:
raise AssertionError(
"\nExpected:\n{0}\nGot:\n{1}\n".format(
str(exres), str(res)))
t4.close()
t3.close()
t1.close()
@with_setup(pretest, posttest)
def test_set_description():
""" Test set description """
with closing(StringIO()) as our_file:
with tqdm(desc='Hello', file=our_file) as t:
assert t.desc == 'Hello: '
t.set_description('World')
assert t.desc == 'World: '
t.set_description()
assert t.desc == ''
@with_setup(pretest, posttest)
def test_deprecated_gui():
""" Test internal GUI properties """
# Check: StatusPrinter iff gui is disabled
with closing(StringIO()) as our_file:
t = tqdm(total=2, gui=True, file=our_file, miniters=1, mininterval=0)
assert not hasattr(t, "sp")
try:
t.update(1)
except TqdmDeprecationWarning as e:
if 'Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`' \
not in our_file.getvalue():
raise
else:
raise DeprecationError('Should not allow manual gui=True without'
' overriding __iter__() and update()')
finally:
t._instances.clear()
# t.close()
# len(tqdm._instances) += 1 # undo the close() decrement
t = tqdm(_range(3), gui=True, file=our_file,
miniters=1, mininterval=0)
try:
for _ in t:
pass
except TqdmDeprecationWarning as e:
if 'Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`' \
not in our_file.getvalue():
raise e
else:
raise DeprecationError('Should not allow manual gui=True without'
' overriding __iter__() and update()')
finally:
t._instances.clear()
# t.close()
# len(tqdm._instances) += 1 # undo the close() decrement
with tqdm(total=1, gui=False, file=our_file) as t:
assert hasattr(t, "sp")
@with_setup(pretest, posttest)
def test_cmp():
""" Test comparison functions """
with closing(StringIO()) as our_file:
t0 = tqdm(total=10, file=our_file)
t1 = tqdm(total=10, file=our_file)
t2 = tqdm(total=10, file=our_file)
assert t0 < t1
assert t2 >= t0
assert t0 <= t2
t3 = tqdm(total=10, file=our_file)
t4 = tqdm(total=10, file=our_file)
t5 = tqdm(total=10, file=our_file)
t5.close()
t6 = tqdm(total=10, file=our_file)
assert t3 != t4
assert t3 > t2
assert t5 == t6
t6.close()
t4.close()
t3.close()
t2.close()
t1.close()
t0.close()
@with_setup(pretest, posttest)
def test_repr():
""" Test representation """
with closing(StringIO()) as our_file:
with tqdm(total=10, ascii=True, file=our_file) as t:
assert str(t) == ' 0%| | 0/10 [00:00<?, ?it/s]'
@with_setup(pretest, posttest)
def test_clear():
""" Test clearing bar display """
with closing(StringIO()) as our_file:
t1 = tqdm(total=10, file=our_file, desc='pos0 bar',
bar_format='{l_bar}')
t2 = trange(10, file=our_file, desc='pos1 bar',
bar_format='{l_bar}')
before = squash_ctrlchars(our_file.getvalue())
t2.clear()
t1.clear()
after = squash_ctrlchars(our_file.getvalue())
t1.close()
t2.close()
assert before == ['pos0 bar: 0%|', 'pos1 bar: 0%|']
assert after == ['', '']
@with_setup(pretest, posttest)
def test_clear_disabled():
""" Test clearing bar display """
with closing(StringIO()) as our_file:
with tqdm(total=10, file=our_file, desc='pos0 bar', disable=True,
bar_format='{l_bar}') as t:
t.clear()
assert our_file.getvalue() == ''
@with_setup(pretest, posttest)
def test_refresh():
""" Test refresh bar display """
with closing(StringIO()) as our_file:
t1 = tqdm(total=10, file=our_file, desc='pos0 bar',
bar_format='{l_bar}', mininterval=999, miniters=999)
t2 = tqdm(total=10, file=our_file, desc='pos1 bar',
bar_format='{l_bar}', mininterval=999, miniters=999)
t1.update()
t2.update()
before = squash_ctrlchars(our_file.getvalue())
t1.refresh()
t2.refresh()
after = squash_ctrlchars(our_file.getvalue())
t1.close()
t2.close()
# Check that refreshing indeed forced the display to use realtime state
assert before == [u'pos0 bar: 0%|', u'pos1 bar: 0%|']
assert after == [u'pos0 bar: 10%|', u'pos1 bar: 10%|']
@with_setup(pretest, posttest)
def test_disabled_refresh():
""" Test refresh bar display """
with closing(StringIO()) as our_file:
with tqdm(total=10, file=our_file, desc='pos0 bar', disable=True,
bar_format='{l_bar}', mininterval=999, miniters=999) as t:
t.update()
t.refresh()
assert our_file.getvalue() == ''
@with_setup(pretest, posttest)
def test_write():
""" Test write messages """
s = "Hello world"
with closing(StringIO()) as our_file:
# Change format to keep only left part w/o bar and it/s rate
t1 = tqdm(total=10, file=our_file, desc='pos0 bar',
bar_format='{l_bar}', mininterval=0, miniters=1)
t2 = trange(10, file=our_file, desc='pos1 bar', bar_format='{l_bar}',
mininterval=0, miniters=1)
t3 = tqdm(total=10, file=our_file, desc='pos2 bar',
bar_format='{l_bar}', mininterval=0, miniters=1)
t1.update()
t2.update()
t3.update()
before = our_file.getvalue()
# Write msg and see if bars are correctly redrawn below the msg
t1.write(s, file=our_file) # call as an instance method
tqdm.write(s, file=our_file) # call as a class method
after = our_file.getvalue()
t1.close()
t2.close()
t3.close()
before_squashed = squash_ctrlchars(before)
after_squashed = squash_ctrlchars(after)
assert after_squashed == [s, s] + before_squashed
# Check that no bar clearing if different file
with closing(StringIO()) as our_file_bar:
with closing(StringIO()) as our_file_write:
t1 = tqdm(total=10, file=our_file_bar, desc='pos0 bar',
bar_format='{l_bar}', mininterval=0, miniters=1)
t1.update()
before_bar = our_file_bar.getvalue()
tqdm.write(s, file=our_file_write)
after_bar = our_file_bar.getvalue()
t1.close()
assert before_bar == after_bar
# Test stdout/stderr anti-mixup strategy
# Backup stdout/stderr
stde = sys.stderr
stdo = sys.stdout
# Mock stdout/stderr
with closing(StringIO()) as our_stderr:
with closing(StringIO()) as our_stdout:
sys.stderr = our_stderr
sys.stdout = our_stdout
t1 = tqdm(total=10, file=sys.stderr, desc='pos0 bar',
bar_format='{l_bar}', mininterval=0, miniters=1)
t1.update()
before_err = sys.stderr.getvalue()
before_out = sys.stdout.getvalue()
tqdm.write(s, file=sys.stdout)
after_err = sys.stderr.getvalue()
after_out = sys.stdout.getvalue()
t1.close()
assert before_err == '\rpos0 bar: 0%|\rpos0 bar: 10%|'
assert before_out == ''
after_err_res = [m[0] for m in RE_pos.findall(after_err)]
assert after_err_res == [u'\rpos0 bar: 0%',
u'\rpos0 bar: 10%',
u'\r ',
u'\r\r ',
u'\rpos0 bar: 10%']
assert after_out == s + '\n'
# Restore stdout and stderr
sys.stderr = stde
sys.stdout = stdo
@with_setup(pretest, posttest)
def test_len():
"""Test advance len (numpy array shape)"""
try:
import numpy as np
except:
raise SkipTest
with closing(StringIO()) as f:
with tqdm(np.zeros((3, 4)), file=f) as t:
assert len(t) == 3
@with_setup(pretest, posttest)
def test_autodisable_disable():
"""Test autodisable will disable on non-TTY"""
with closing(StringIO()) as our_file:
with tqdm(total=10, disable=None, file=our_file) as t:
t.update(3)
assert our_file.getvalue() == ''
@with_setup(pretest, posttest)
def test_autodisable_enable():
"""Test autodisable will not disable on TTY"""
with closing(StringIO()) as our_file:
setattr(our_file, "isatty", lambda: True)
with tqdm(total=10, disable=None, file=our_file) as t:
t.update()
assert our_file.getvalue() != ''
@with_setup(pretest, posttest)
def test_deprecation_exception():
def test_TqdmDeprecationWarning():
with closing(StringIO()) as our_file:
raise (TqdmDeprecationWarning('Test!',
fp_write=getattr(our_file, 'write',
sys.stderr.write)))
def test_TqdmDeprecationWarning_nofpwrite():
raise (TqdmDeprecationWarning('Test!', fp_write=None))
assert_raises(TqdmDeprecationWarning, test_TqdmDeprecationWarning)
assert_raises(Exception, test_TqdmDeprecationWarning_nofpwrite)
@with_setup(pretest, posttest)
def test_monitoring_thread():
# Note: should fix miniters for these tests, else with dynamic_miniters
# it's too complicated to handle with monitoring update and maxinterval...
maxinterval = 10
# 1- Configure and test the thread alone
# Setup a discrete timer
timer = DiscreteTimer()
TMonitor._time = timer.time
# And a fake sleeper
sleeper = FakeSleep(timer)
TMonitor._sleep = sleeper.sleep
# And a fake tqdm
class fake_tqdm(object):
_instances = []
# Instanciate the monitor
monitor = TMonitor(fake_tqdm, maxinterval)
# Test if alive, then killed
assert monitor.report()
monitor.exit()
timer.sleep(maxinterval*2) # need to go out of the sleep to die
assert not monitor.report()
# assert not monitor.is_alive() # not working dunno why, thread not killed
del monitor
# 2- Test for real with a tqdm instance that takes too long
total = 1000
# Setup a discrete timer
timer = DiscreteTimer()
# And a fake sleeper
sleeper = FakeSleep(timer)
# Setup TMonitor to use the timer
TMonitor._time = timer.time
TMonitor._sleep = sleeper.sleep
# Set monitor interval
tqdm.monitor_interval = maxinterval
with closing(StringIO()) as our_file:
with tqdm(total=total, file=our_file, miniters=500,
mininterval=0.1, maxinterval=maxinterval) as t:
cpu_timify(t, timer)
# Do a lot of iterations in a small timeframe
# (smaller than monitor interval)
timer.sleep(maxinterval/2) # monitor won't wake up
t.update(500)
# check that our fixed miniters is still there
assert t.miniters == 500
# Then do 1 it after monitor interval, so that monitor kicks in
timer.sleep(maxinterval*2)
t.update(1)
# Wait for the monitor to get out of sleep's loop and update tqdm..
timeend = timer.time()
while not (t.monitor.woken >= timeend and t.miniters == 1):
timer.sleep(1) # Force monitor to wake up if it woken too soon
sleep(0.000001) # sleep to allow interrupt (instead of pass)
assert t.miniters == 1 # check that monitor corrected miniters
# Note: at this point, there may be a race condition: monitor saved
# current woken time but timer.sleep() happen just before monitor
# sleep. To fix that, either sleep here or increase time in a loop
# to ensure that monitor wakes up at some point.
# Try again but already at miniters = 1 so nothing will be done
timer.sleep(maxinterval*2)
t.update(2)
timeend = timer.time()
while not (t.monitor.woken >= timeend):
timer.sleep(1) # Force monitor to wake up if it woken too soon
sleep(0.000001)
# Wait for the monitor to get out of sleep's loop and update tqdm..
assert t.miniters == 1 # check that monitor corrected miniters
# 3- Check that class var monitor is deleted if no instance left
assert tqdm.monitor is None
# 4- Test on multiple bars, one not needing miniters adjustment
total = 1000
# Setup a discrete timer
timer = DiscreteTimer()
# And a fake sleeper
sleeper = FakeSleep(timer)
# Setup TMonitor to use the timer
TMonitor._time = timer.time
TMonitor._sleep = sleeper.sleep
with closing(StringIO()) as our_file:
with tqdm(total=total, file=our_file, miniters=500,
mininterval=0.1, maxinterval=maxinterval) as t1:
# Set high maxinterval for t2 so monitor does not need to adjust it
with tqdm(total=total, file=our_file, miniters=500,
mininterval=0.1, maxinterval=1E5) as t2:
cpu_timify(t1, timer)
cpu_timify(t2, timer)
# Do a lot of iterations in a small timeframe
timer.sleep(5)
t1.update(500)
t2.update(500)
assert t1.miniters == 500
assert t2.miniters == 500
# Then do 1 it after monitor interval, so that monitor kicks in
timer.sleep(maxinterval*2)
t1.update(1)
t2.update(1)
# Wait for the monitor to get out of sleep and update tqdm
timeend = timer.time()
while not (t.monitor.woken >= timeend and t1.miniters == 1):
timer.sleep(1)
sleep(0.000001)
assert t1.miniters == 1 # check that monitor corrected miniters
assert t2.miniters == 500 # check that t2 was not adjusted
@with_setup(pretest, posttest)
def test_postfix():
"""Test postfix"""
postfix = {'float': 0.321034, 'gen': 543, 'str': 'h', 'lst': [2]}
postfix_order = (('w', 'w'), ('a', 0)) # no need for a OrderedDict, set is OK
expected = ['float=0.321', 'gen=543', 'lst=[2]', 'str=h']
expected_order = ['w=w', 'a=0', 'float=0.321', 'gen=543', 'lst=[2]', 'str=h']
# Test postfix set at init
with closing(StringIO()) as our_file:
with tqdm(total=10, file=our_file, desc='pos0 bar',
bar_format='{r_bar}', postfix=postfix) as t1:
t1.refresh()
out = our_file.getvalue()
# Test postfix set after init
with closing(StringIO()) as our_file:
with trange(10, file=our_file, desc='pos1 bar',
bar_format='{r_bar}', postfix=None) as t2:
t2.set_postfix(**postfix)
t2.refresh()
out2 = our_file.getvalue()
# Order of items in dict may change, so need a loop to check per item
for res in expected:
assert res in out
assert res in out2
# Test postfix set after init and with ordered dict
with closing(StringIO()) as our_file:
with trange(10, file=our_file, desc='pos2 bar',
bar_format='{r_bar}', postfix=None) as t3:
t3.set_postfix(postfix_order, **postfix)
t3.refresh()
out3 = our_file.getvalue()
out3 = out3[1:-1].split(', ')[3:]
assert out3 == expected_order
|
the-stack_0_13172 | from google.cloud import storage
import googleapiclient.discovery
import shutil
import os
import time
from .function import port_open, post_slack
from .libraries import extra_libraries, important_libraries
from herpetologist import check_type
import subprocess
import cloudpickle
from typing import Callable, List
additional_command = [
'gsutil cp gs://general-bucket/dask.zip dask.zip',
'unzip dask.zip',
'worker_size=1 name=a project=a zone=a expired=99999 docker-compose -f docker-compose.yaml up --build',
]
dask_network = {
'allowed': [{'IPProtocol': 'tcp', 'ports': ['8787', '8786']}],
'description': '',
'direction': 'INGRESS',
'kind': 'compute#firewall',
'name': 'dask-network',
'priority': 1000.0,
'sourceRanges': ['0.0.0.0/0'],
'targetTags': ['dask'],
}
@check_type
def build_image(
project: str,
zone: str,
bucket_name: str,
image_name: str,
family: str,
instance_name: str = 'build-dask-instance',
source_image: dict = {
'project': 'ubuntu-os-cloud',
'family': 'ubuntu-1804-lts',
},
storage_image: str = 'asia-southeast1',
webhook_function: Callable = post_slack,
validate_webhook: bool = True,
additional_libraries: List[str] = extra_libraries,
install_bash: str = None,
dockerfile: str = None,
**kwargs,
):
"""
Parameters
----------
project: str
project id
zone: str
bucket_name: str
bucket name to upload dask code, can be private.
image_name: str
image name for dask bootloader.
family: str
family name for built image
instance_name: str (default='build-dask-instance')
Start-up instance to build the image
source_image: dict (default={'project': 'ubuntu-os-cloud', 'family': 'ubuntu-1804-lts'})
Source image to start the instance for building the image
storage_image: str, (default='asia-southeast1')
storage location for dask image.
webhook_function: Callable, (default=post_slack)
Callable function to send alert during gracefully delete, default is post_slack.
validate_webhook: bool, (default=True)
if True, will validate `webhook_function`.
Not suggest to set it as False because this webhook_function will use during gracefully delete.
additional_libraries: List[str], (default=extra_libraries).
add more libraries from PYPI. This is necessary if want dask cluster able to necessary libraries.
install_bash: str, (default=None).
File path to custom start-up script to build disk image
dockerfile: List[str], (default=None).
File path to custom Dockerfile to build docker image
**kwargs:
Keyword arguments to pass to webhook_function.
"""
def nested_post(msg):
return webhook_function(msg, **kwargs)
if validate_webhook:
if nested_post('Testing from ondemand-dask') != 200:
raise Exception('`webhook_function` must returned 200.')
compute = googleapiclient.discovery.build('compute', 'v1')
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
this_dir = os.path.dirname(__file__)
pkl = os.path.join(this_dir, 'image', 'dask', 'post.pkl')
with open(pkl, 'wb') as fopen:
cloudpickle.dump(nested_post, fopen)
reqs = important_libraries + additional_libraries
reqs = list(set(reqs))
req = os.path.join(this_dir, 'image', 'dask', 'requirements.txt')
with open(req, 'w') as fopen:
fopen.write('\n'.join(reqs))
if dockerfile:
with open(dockerfile, 'r') as fopen:
script = fopen.read()
dockerfile_path = os.path.join(this_dir, 'image', 'dask', 'Dockerfile')
with open(dockerfile_path, 'w') as fopen:
fopen.write(script)
image = os.path.join(this_dir, 'image')
shutil.make_archive('dask', 'zip', image)
blob = bucket.blob('dask.zip')
blob.upload_from_filename('dask.zip')
os.remove('dask.zip')
image_response = (
compute.images()
.getFromFamily(**source_image)
.execute()
)
source_disk_image = image_response['selfLink']
try:
print('Creating `dask-network` firewall rule.')
compute.firewalls().insert(
project = project, body = dask_network
).execute()
print('Done.')
except:
print('`dask-network` exists.')
machine_type = f'zones/{zone}/machineTypes/n1-standard-1'
if install_bash is None:
install_bash = 'install.sh'
install_bash = os.path.join(this_dir, install_bash)
startup_script = open(install_bash).read()
startup_script = '\n'.join(
startup_script.split('\n') + additional_command
).replace('general-bucket', bucket_name)
config = {
'name': instance_name,
'tags': {'items': ['dask']},
'machineType': machine_type,
'disks': [
{
'boot': True,
'autoDelete': True,
'initializeParams': {'sourceImage': source_disk_image},
}
],
'networkInterfaces': [
{
'network': 'global/networks/default',
'accessConfigs': [
{'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'}
],
}
],
'serviceAccounts': [
{
'email': 'default',
'scopes': [
'https://www.googleapis.com/auth/devstorage.read_write',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/compute',
],
}
],
'metadata': {
'items': [
{'key': 'startup-script', 'value': startup_script},
{'key': 'bucket', 'value': bucket_name},
]
},
}
operation = (
compute.instances()
.insert(project = project, zone = zone, body = config)
.execute()
)
print(f'Waiting instance `{instance_name}` to run.')
while True:
result = (
compute.zoneOperations()
.get(project = project, zone = zone, operation = operation['name'])
.execute()
)
if result['status'] == 'DONE':
if 'error' in result:
raise Exception(result['error'])
else:
print('Done.')
break
time.sleep(1)
print('Waiting IP Address to check health.')
while True:
result = (
compute.instances().list(project = project, zone = zone).execute()
)
results = result['items'] if 'items' in result else None
dask = [r for r in results if r['name'] == instance_name]
if len(dask) > 0:
dask = dask[0]
ip_address = dask['networkInterfaces'][0]['accessConfigs'][0][
'natIP'
]
print(f'Got it, Public IP: {ip_address}')
break
time.sleep(2)
print('Waiting Dask cluster to run.')
while True:
if port_open(ip_address, 8786) and port_open(ip_address, 8787):
print('Done.')
break
time.sleep(5)
compute = googleapiclient.discovery.build('compute', 'v1')
print(f'Deleting image `{image_name}` if exists.')
try:
compute.images().delete(project = project, image = image_name).execute()
print('Done.')
except:
pass
# give a rest to gcp API before build the image.
time.sleep(20)
print(f'Building image `{image_name}`.')
try:
o = subprocess.check_output(
[
'gcloud',
'compute',
'images',
'create',
image_name,
'--source-disk',
instance_name,
'--source-disk-zone',
zone,
'--family',
family,
'--storage-location',
storage_image,
'--force',
],
stderr = subprocess.STDOUT,
)
print('Done.')
except subprocess.CalledProcessError as e:
print(e.output.decode('utf-8'))
raise
print(f'Deleting instance `{instance_name}`.')
compute = googleapiclient.discovery.build('compute', 'v1')
compute.instances().delete(
project = project, zone = zone, instance = instance_name
).execute()
print('Done.')
return True
|
the-stack_0_13174 | import contextlib
import glob
import json
import os
import sys
from sqlalchemy.orm import scoped_session
import get
import db
from flask import *
app = Flask(__name__)
@contextlib.contextmanager
def working_directory(path):
prev_cwd = os.getcwd()
os.chdir(path)
yield
os.chdir(prev_cwd)
# Get files
#with working_directory('./static/pixiv'):
# posts = glob.glob('*.png') + glob.glob('*.jpg')
dbsession = scoped_session(db.Session())
if db.isempty():
print("downloading")
get.main()
with working_directory('./static/pixiv'):
ids = glob.glob('*.json')
print(str(ids))
ids = map(lambda x : x.split('.')[0], ids)
for id in ids:
db.addpixiv(id)
print(db.session.query(db.Posts).all())
#posts = map(addition, numbers)
@app.route('/')
def hello_world():
return render_template("hello.html", posts=map(lambda x : x.file_name,db.session.query(db.Posts).all()))
@app.route('/download_pixiv', methods=['GET'])
def presentpixiv():
return """<!DOCTYPE html>
<html>
<body>
<form action="/download_pixiv" method="post" >
<label for="fname">pixiv id:</label><br>
<input type="text" id="pixiv_id" name="pixiv_id" value="20"><br>
<input type="submit" value="Submit">
</form>
<p>If you click the "Submit" button, the form-data will be sent to a page called "/download_pixiv".</p>
</body>
</html>"""
@app.route('/download_pixiv', methods=['POST'])
def downloadpixiv():
id = request.form.get('pixiv_id')
get.download(id);
db.addpixiv(id)
return "done"
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8080)
|
the-stack_0_13176 | from ._builtin import Page, WaitPage
import random
from exp.util import Participant
from exp.payment import PaymentMethod, MethodThreeResults, MethodOneResults, MethodTwoResults
from exp.lottery import Lottery
class FinalPayoffResults(Page):
def vars_for_template(self):
experiment = Participant.get_experiment(self.player)
method_one = Participant.get_payment_one_results(self.player)
method_two = Participant.get_payment_two_results(self.player)
method_three = Participant.get_payment_three_results(self.player)
part_one_earnings = method_one.earnings + method_two.earnings
part_one_payoff = experiment.PART_ONE_WEIGHT*part_one_earnings*experiment.CONVERSION_RATE
part_two_payoff = experiment.PART_TWO_WEIGHT*method_three.earnings*experiment.CONVERSION_RATE
final_payoff = experiment.SHOW_UP_FEE + experiment.ENDOWMENT + part_one_payoff + part_two_payoff
return {
'show_up_fee': experiment.SHOW_UP_FEE,
'endowment': experiment.ENDOWMENT,
'rate': experiment.CONVERSION_RATE,
'method_1': round(method_one.earnings, 2),
'method_2': round(method_two.earnings, 2),
'method_3': round(method_three.earnings, 2),
'total_in_credits': round(part_one_earnings, 2),
'earnings_1': round(part_one_payoff, 2),
'earnings_2': round(part_two_payoff, 2),
'final_payoff': round(final_payoff, 2),
}
class MethodOneResultsPage(Page):
def vars_for_template(self):
experiment = Participant.get_experiment(self.player)
results = Participant.get_payment_one_results(self.player)
random_position = 'Left' if results.left_auction.aid == results.auction.aid else 'Right'
if results.preferred_position == experiment.phase_one.LEFT:
preferred_position = 'Left'
elif results.preferred_position == experiment.phase_one.RIGHT:
preferred_position = 'Right'
else:
preferred_position = 'Indifferent'
if results.random_signal_is_percentage:
random_signal = round(results.random_signal * 100, 2)
else:
random_signal = results.random_signal
if results.other_random_signal_is_percentage:
others_random_signal = round(results.other_random_signal * 100, 2)
else:
others_random_signal = results.other_random_signal
return {
'player_id': results.player_id,
'other_id': results.other_player_id,
'preferred_position': preferred_position,
'left_auction': results.left_auction,
'right_auction': results.right_auction,
'auction': results.auction,
'random_position': random_position,
'bid': results.bid,
'others_bid': results.other_bid,
'winner': results.lottery_won,
'signal_is_percentage': results.random_signal_is_percentage,
'signal': random_signal,
'others_signal': others_random_signal,
'others_signal_is_percentage': results.other_random_signal_is_percentage,
'low_value': results.low_value,
'high_value': results.high_value,
'low_prob': round(results.low_prob * 100, 2),
'high_prob': round(results.high_prob * 100, 2),
'high_chosen': results.high_prize_chosen,
'earnings': results.earnings,
'realized': results.realized,
'auction_type': results.auction.atype,
'low_prize_chosen': results.low_prize_chosen,
'high_prize_chosen': results.high_prize_chosen,
}
class MethodTwoResultsPage(Page):
def vars_for_template(self):
results = Participant.get_payment_two_results(self.player)
context = {
'player_id': results.player_id,
'other_id': results.other_player_id,
'cutoff_auction': results.auction,
'cutoff': results.cutoff,
'random_offer': round(results.random_offer, 2),
'offer_accepted': results.offer_accepted,
}
if not results.offer_accepted:
if results.random_signal_is_percentage:
random_signal = round(results.random_signal * 100, 2)
else:
random_signal = int(results.random_signal)
if results.other_random_signal_is_percentage:
others_random_signal = round(results.other_random_signal * 100, 2)
else:
others_random_signal = int(results.other_random_signal)
context.update({
'auction': results.auction,
'bid': results.bid,
'others_bid': results.other_bid,
'winner': results.lottery_won,
'signal': random_signal,
'others_signal': others_random_signal,
'signal_is_percentage': results.random_signal_is_percentage,
'others_signal_is_percentage': results.other_random_signal_is_percentage,
'low_value': results.low_value,
'high_value': results.high_value,
'low_prob': results.low_prob * 100,
'high_prob': results.high_prob * 100,
'high_chosen': results.high_prize_chosen,
'earnings': results.earnings,
'realized': results.realized,
'auction_type': results.auction.atype,
'low_prize_chosen': results.low_prize_chosen,
'high_prize_chosen': results.high_prize_chosen,
})
return context
class MethodThreeResultsPage(Page):
def vars_for_template(self):
results = Participant.get_payment_three_results(self.player)
context = {
'rolled_side': results.rolled_side,
'rolled_side_encoded': results.rolled_side_encoded,
'die_encoding': results.die_encoding,
'bet_color': Lottery.BET_HIGH_RED if results.bet_color == Lottery.BET_HIGH_RED else Lottery.BET_HIGH_BLUE,
'bet_high_red': Lottery.BET_HIGH_RED,
'bet_high_blue': Lottery.BET_HIGH_BLUE,
'high_value': results.high_value,
'low_value': results.low_value,
'lottery': results.lottery,
'lottery_type': results.lottery.ltype,
'cutoff': results.cutoff,
'random_cutoff': results.random_cutoff,
'play_lottery': results.play_lottery,
'num_red': results.num_red,
'num_blue': results.num_blue,
'realized_value': results.realized_value,
'earnings': results.earnings
}
return context
class ResultsWaitPage(WaitPage):
def after_all_players_arrive(self):
players = self.group.get_players()[:]
for i, player in enumerate(players):
player_id = player.participant.id_in_session
others = players[:i] + players[i + 1:]
other_player = random.choice(others)
other_id = other_player.participant.id_in_session
experiment = Participant.get_experiment(player)
other_experiment = Participant.get_experiment(other_player)
payment_method = PaymentMethod(player_id, other_id, experiment, other_experiment)
method_one_results = payment_method.method_one_payment(MethodOneResults())
method_two_results = payment_method.method_two_payment(MethodTwoResults())
method_three_results = payment_method.method_three_results(MethodThreeResults())
Participant.set_payment_one_results(player, method_one_results)
Participant.set_payment_two_results(player, method_two_results)
Participant.set_payment_three_results(player, method_three_results)
part_one_earnings = method_one_results.earnings + method_two_results.earnings
part_one_payoff = experiment.PART_ONE_WEIGHT*part_one_earnings*experiment.CONVERSION_RATE
part_two_payoff = experiment.PART_TWO_WEIGHT*method_three_results.earnings*experiment.CONVERSION_RATE
final_payoff = experiment.SHOW_UP_FEE + experiment.ENDOWMENT + part_one_payoff + part_two_payoff
player.payoff = final_payoff
player.save_results(method_one_results, method_two_results, method_three_results)
page_sequence = [
ResultsWaitPage,
MethodOneResultsPage,
MethodTwoResultsPage,
MethodThreeResultsPage,
FinalPayoffResults
]
|
the-stack_0_13177 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def inorderTraversal(self, root: TreeNode) -> List[int]:
stack = []
res = []
if not root:
return []
if root.right:
stack.append((root.right,0))
if root:
stack.append((root,1))
if root.left:
stack.append((root.left,0))
while stack:
node = stack.pop()
if node[1]==0:
if node[0].right:
stack.append((node[0].right,0))
if node[0].left:
stack.append((node[0],1))
stack.append((node[0].left,0))
else:
res.append(node[0].val)
else:
res.append(node[0].val)
return res
""" recursive
if not root:
return []
res =[]
if(root.left):
res.extend(self.inorderTraversal(root.left))
res.append(root.val)
if(root.right):
res.extend(self.inorderTraversal(root.right))
return res
"""
|
the-stack_0_13178 | import requests
from variables import rapidApiKey
def ipLocation(ipAddress):
url = "https://ip-location5.p.rapidapi.com/get_geo_info"
payload = "ip="+ipAddress
headers = {
'content-type': "application/x-www-form-urlencoded",
'x-rapidapi-key': rapidApiKey,
'x-rapidapi-host': "ip-location5.p.rapidapi.com"
}
response = requests.request("POST", url, data=payload, headers=headers)
dataResponse = response.json()
# print(response.text)
neededData = ['ip','region','city','latitude','longitude']
importantData = []
for data in neededData:
importantData.append(dataResponse[data])
# print(neededData)
# print(importantData)
return response
"""
{
"ip":"45.16.197.205"
"continent":{2 items
"code":"NA"
"name":"North America"
}
"country":{5 items
"code":"US"
"name":"United States"
"capital":"Washington"
"currency":"USD"
"phone-code":"1"
}
"region":"Texas"
"city":"Richardson"
"latitude":32.9483
"longitude":-96.7299
}
"""
|
the-stack_0_13181 | class ExifFormat:
def __init__(self, id, name, size, short_name):
self.id = id
self.name = name
self.size = size
self.short_name = short_name # used with struct.unpack()
exif_formats = {
1: ExifFormat(1, 'unsigned byte', 1, 'B'),
2: ExifFormat(2, 'ascii string', 1, 's'),
3: ExifFormat(3, 'unsigned short', 2, 'H'),
4: ExifFormat(4, 'unsigned long', 4, 'L'),
5: ExifFormat(5, 'unsigned rational', 8, ''),
6: ExifFormat(6, 'signed byte', 1, 'b'),
7: ExifFormat(7, 'undefined', 1, 'B'), # consider `undefined` as `unsigned byte`
8: ExifFormat(8, 'signed short', 2, 'h'),
9: ExifFormat(9, 'signed long', 4, 'l'),
10: ExifFormat(10, 'signed rational', 8, ''),
11: ExifFormat(11, 'single float', 4, 'f'),
12: ExifFormat(12, 'double float', 8, 'd'),
}
|
the-stack_0_13182 | # %%
#
example = """123 -> x
456 -> y
x AND y -> d
x OR y -> e
x LSHIFT 2 -> f
y RSHIFT 2 -> g
NOT x -> h
NOT y -> i"""
def load(l):
# connections to each wire, either an integer or a formula
cn = dict()
for l in l.splitlines():
expr, output = l.split(' -> ')
cn[output] = expr
return cn
def solve_memo(cons, var):
if var in cons and isinstance(cons[var], int):
return cons[var]
v = solve1(cons, var)
cons[var] = v
print(f'{var} = {v}')
return v
def solve1(cons, var):
try:
return int(var)
except ValueError:
pass
expr = cons[var]
try:
return int(expr)
except ValueError:
pass
w = expr.split()
if len(w) == 1 and w[0] in cons:
return solve_memo(cons, w[0])
# print(w)
if w[0] == 'NOT':
assert len(w) == 2
return 65535 ^ solve_memo(cons, w[1])
op = w[1]
assert len(w) == 3
f = {'AND':
lambda x, y: x & y,
'OR': lambda x, y: x | y,
'LSHIFT': lambda x, y: (x << y) & 0xffff,
'RSHIFT': lambda x, y: (x >> y)
}
return f[op](solve_memo(cons, w[0]), solve_memo(cons, w[2]))
# %%
cons = load(example)
assert solve1(cons, 'd') == 72
assert solve1(cons, 'e') == 507
assert solve1(cons, 'f') == 492
assert solve1(cons, 'g') == 114
assert solve1(cons, 'h') == 65412
assert solve1(cons, 'i') == 65079
assert solve1(cons, 'x') == 123
assert solve1(cons, 'y') == 456
# %%
cons = load(open('../input/07.txt').read())
print(solve_memo(cons, 'a'))
# %%
# Part 2
cons = load(open('../input/07.txt').read())
cons['b'] = 16076
print(solve_memo(cons, 'a'))
# %%
|
the-stack_0_13183 | import tensorflow as tf
from . import CustomDropout
from tensorflow.compat.v1.keras.layers import CuDNNLSTM
class BaselineModel(tf.keras.Model):
def __init__(self, input_size, slot_size, intent_size, layer_size=128):
super(BaselineModel, self).__init__()
self.embedding = tf.keras.layers.Embedding(input_size, layer_size)
self.bilstm = tf.keras.layers.Bidirectional(CuDNNLSTM(layer_size, return_sequences=True, return_state=True))
self.dropout = CustomDropout.CustomDropout(0.5)
self.intent_out = tf.keras.layers.Dense(intent_size, activation=None)
self.slot_out = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(slot_size, activation=None))
@tf.function
def call(self, inputs, sequence_length, isTraining=True):
x = self.embedding(inputs)
state_outputs, forward_h, forward_c, backward_h, backward_c = self.bilstm(x)
state_outputs = self.dropout(state_outputs, isTraining)
forward_h = self.dropout(forward_h, isTraining)
backward_h = self.dropout(backward_h, isTraining)
final_state = tf.keras.layers.concatenate([forward_h, backward_h])
intent = self.intent_out(final_state)
slots = self.slot_out(state_outputs)
outputs = [slots, intent]
return outputs |
the-stack_0_13185 | import sys
from numpy.distutils.core import Extension, setup
__author__ = "Lars Andersen Bratholm"
__copyright__ = "Copyright 2017"
__credits__ = ["Lars Andersen Bratholm (2017) https://github.com/larsbratholm/fns"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "Lars Andersen Bratholm"
__email__ = "[email protected]"
__status__ = "Alpha"
__description__ = "Furthest Neighbour Search"
__url__ = "https://github.com/larsbratholm/fns"
FORTRAN = "f90"
# GNU (default)
COMPILER_FLAGS = ["-fopenmp", "-m64", "-march=native", "-fPIC", "-Ofast", "-ffast-math", "-funroll-loops",
"-Wno-maybe-uninitialized", "-Wno-unused-function", "-Wno-cpp"]#, "-fcheck=all"]
LINKER_FLAGS = ["-L/usr/include","-L/include","-I/usr/include","-I/include","-lgomp"]
MATH_LINKER_FLAGS = ["-lblas", "-llapack"]
# For clang without OpenMP: (i.e. most Apple/mac system)
if sys.platform == "darwin" and all(["gnu" not in arg for arg in sys.argv]):
COMPILER_FLAGS = ["-O3", "-m64", "-march=native", "-fPIC"]
LINKER_FLAGS = []
MATH_LINKER_FLAGS = ["-lblas", "-llapack"]
# Intel
if any(["intelem" in arg for arg in sys.argv]):
COMPILER_FLAGS = ["-xHost", "-O3", "-axAVX", "-qopenmp"]
LINKER_FLAGS = ["-liomp5", " -lpthread", "-lm", "-ldl"]
MATH_LINKER_FLAGS = ["-L${MKLROOT}/lib/intel64", "-lmkl_rt"]
# UNCOMMENT TO FORCE LINKING TO MKL with GNU compilers:
# LINKER_FLAGS = ["-lgomp", " -lpthread", "-lm", "-ldl"]
# MATH_LINKER_FLAGS = ["-L${MKLROOT}/lib/intel64", "-lmkl_rt"]
ext_ffn = Extension(name = 'ffn',
sources = ['fns/ffn.f90'],
extra_f90_compile_args = COMPILER_FLAGS,
extra_f77_compile_args = COMPILER_FLAGS,
extra_compile_args = COMPILER_FLAGS,
extra_link_args = LINKER_FLAGS,
language = FORTRAN,
f2py_options=['--quiet'])
# use README.md as long description
def readme():
with open('README.md') as f:
return f.read()
def setup_pepytools():
setup(
name="fns",
packages=['fns'],
# metadata
version=__version__,
author=__author__,
author_email=__email__,
platforms = 'Any',
description = __description__,
long_description = readme(),
keywords = ['Furthest Neighbour'],
classifiers = [],
url = __url__,
# set up package contents
ext_package = 'fns',
ext_modules = [
ext_ffn,
],
)
if __name__ == '__main__':
setup_pepytools()
|
the-stack_0_13186 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class lsnpool_lsnip_binding(base_resource) :
"""Binding class showing the lsnip that can be bound to lsnpool."""
def __init__(self) :
self._lsnip = ""
self._poolname = ""
self.___count = 0
@property
def poolname(self) :
"""Name for the LSN pool. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after the LSN pool is created. The following requirement applies only to the NetScaler CLI: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "lsn pool1" or 'lsn pool1').<br/>Minimum length = 1<br/>Maximum length = 127."""
try :
return self._poolname
except Exception as e:
raise e
@poolname.setter
def poolname(self, poolname) :
"""Name for the LSN pool. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after the LSN pool is created. The following requirement applies only to the NetScaler CLI: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "lsn pool1" or 'lsn pool1').<br/>Minimum length = 1<br/>Maximum length = 127
:param poolname:
"""
try :
self._poolname = poolname
except Exception as e:
raise e
@property
def lsnip(self) :
"""IPv4 address or a range of IPv4 addresses to be used as NAT IP address(es) for LSN.
After the pool is created, these IPv4 addresses are added to the NetScaler ADC as NetScaler owned IP address of type LSN. A maximum of 4096 IP addresses can be bound to an LSN pool. An LSN IP address associated with an LSN pool cannot be shared with other LSN pools. IP addresses specified for this parameter must not already exist on the NetScaler ADC as any NetScaler owned IP addresses. In the command line interface, separate the range with a hyphen. For example: 10.102.29.30-10.102.29.189. You can later remove some or all the LSN IP addresses from the pool, and add IP addresses to the LSN pool.
.<br/>Minimum length = 1.
"""
try :
return self._lsnip
except Exception as e:
raise e
@lsnip.setter
def lsnip(self, lsnip) :
"""IPv4 address or a range of IPv4 addresses to be used as NAT IP address(es) for LSN.
After the pool is created, these IPv4 addresses are added to the NetScaler ADC as NetScaler owned IP address of type LSN. A maximum of 4096 IP addresses can be bound to an LSN pool. An LSN IP address associated with an LSN pool cannot be shared with other LSN pools. IP addresses specified for this parameter must not already exist on the NetScaler ADC as any NetScaler owned IP addresses. In the command line interface, separate the range with a hyphen. For example: 10.102.29.30-10.102.29.189. You can later remove some or all the LSN IP addresses from the pool, and add IP addresses to the LSN pool.
.<br/>Minimum length = 1
:param lsnip:
"""
try :
self._lsnip = lsnip
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(lsnpool_lsnip_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lsnpool_lsnip_binding
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
if self.poolname is not None :
return str(self.poolname)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
"""
:param client:
:param resource:
"""
try :
if resource and type(resource) is not list :
updateresource = lsnpool_lsnip_binding()
updateresource.poolname = resource.poolname
updateresource.lsnip = resource.lsnip
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [lsnpool_lsnip_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].poolname = resource[i].poolname
updateresources[i].lsnip = resource[i].lsnip
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
"""
:param client:
:param resource:
"""
try :
if resource and type(resource) is not list :
deleteresource = lsnpool_lsnip_binding()
deleteresource.poolname = resource.poolname
deleteresource.lsnip = resource.lsnip
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [lsnpool_lsnip_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].poolname = resource[i].poolname
deleteresources[i].lsnip = resource[i].lsnip
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, poolname) :
"""Use this API to fetch lsnpool_lsnip_binding resources.
:param service:
:param poolname:
"""
try :
obj = lsnpool_lsnip_binding()
obj.poolname = poolname
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, poolname, filter_) :
"""Use this API to fetch filtered set of lsnpool_lsnip_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param service:
:param poolname:
:param filter_:
"""
try :
obj = lsnpool_lsnip_binding()
obj.poolname = poolname
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, poolname) :
"""Use this API to count lsnpool_lsnip_binding resources configued on NetScaler.
:param service:
:param poolname:
"""
try :
obj = lsnpool_lsnip_binding()
obj.poolname = poolname
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, poolname, filter_) :
"""Use this API to count the filtered set of lsnpool_lsnip_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param service:
:param poolname:
:param filter_:
"""
try :
obj = lsnpool_lsnip_binding()
obj.poolname = poolname
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class lsnpool_lsnip_binding_response(base_response) :
""" """
def __init__(self, length=1) :
self.lsnpool_lsnip_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lsnpool_lsnip_binding = [lsnpool_lsnip_binding() for _ in range(length)]
|
the-stack_0_13187 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""An I/O event loop for non-blocking sockets.
Typical applications will use a single `IOLoop` object, in the
`IOLoop.instance` singleton. The `IOLoop.start` method should usually
be called at the end of the ``main()`` function. Atypical applications may
use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest`
case.
In addition to I/O events, the `IOLoop` can also schedule time-based events.
`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import errno
import functools
import heapq
import itertools
import logging
import numbers
import os
import select
import sys
import threading
import time
import traceback
from tornado.concurrent import TracebackFuture, is_future
from tornado.log import app_log, gen_log
from tornado import stack_context
from tornado.util import Configurable
from tornado.util import errno_from_exception
try:
import signal
except ImportError:
signal = None
try:
import thread # py2
except ImportError:
import _thread as thread # py3
from tornado.platform.auto import set_close_exec, Waker
_POLL_TIMEOUT = 3600.0
class TimeoutError(Exception):
pass
class IOLoop(Configurable):
"""A level-triggered I/O loop.
We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they
are available, or else we fall back on select(). If you are
implementing a system that needs to handle thousands of
simultaneous connections, you should use a system that supports
either ``epoll`` or ``kqueue``.
Example usage for a simple TCP server::
import errno
import functools
import ioloop
import socket
def connection_ready(sock, fd, events):
while True:
try:
connection, address = sock.accept()
except socket.error, e:
if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
return
connection.setblocking(0)
handle_connection(connection, address)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", port))
sock.listen(128)
io_loop = ioloop.IOLoop.instance()
callback = functools.partial(connection_ready, sock)
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
io_loop.start()
"""
# Constants from the epoll module
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
# Our events map exactly to the epoll events
NONE = 0
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
# Global lock for creating global IOLoop instance
_instance_lock = threading.Lock()
_current = threading.local()
@staticmethod
def instance():
"""Returns a global `IOLoop` instance.
Most applications have a single, global `IOLoop` running on the
main thread. Use this method to get this instance from
another thread. To get the current thread's `IOLoop`, use `current()`.
"""
if not hasattr(IOLoop, "_instance"):
with IOLoop._instance_lock:
if not hasattr(IOLoop, "_instance"):
# New instance after double check
IOLoop._instance = IOLoop()
return IOLoop._instance
@staticmethod
def initialized():
"""Returns true if the singleton instance has been created."""
return hasattr(IOLoop, "_instance")
def install(self):
"""Installs this `IOLoop` object as the singleton instance.
This is normally not necessary as `instance()` will create
an `IOLoop` on demand, but you may want to call `install` to use
a custom subclass of `IOLoop`.
"""
assert not IOLoop.initialized()
IOLoop._instance = self
@staticmethod
def clear_instance():
"""Clear the global `IOLoop` instance.
.. versionadded:: 4.0
"""
if hasattr(IOLoop, "_instance"):
del IOLoop._instance
@staticmethod
def current():
"""Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as current
by `make_current`, returns that instance. Otherwise returns
`IOLoop.instance()`, i.e. the main thread's `IOLoop`.
A common pattern for classes that depend on ``IOLoops`` is to use
a default argument to enable programs with multiple ``IOLoops``
but not require the argument for simpler applications::
class MyClass(object):
def __init__(self, io_loop=None):
self.io_loop = io_loop or IOLoop.current()
In general you should use `IOLoop.current` as the default when
constructing an asynchronous object, and use `IOLoop.instance`
when you mean to communicate to the main thread from a different
one.
"""
current = getattr(IOLoop._current, "instance", None)
if current is None:
return IOLoop.instance()
return current
def make_current(self):
"""Makes this the `IOLoop` for the current thread.
An `IOLoop` automatically becomes current for its thread
when it is started, but it is sometimes useful to call
`make_current` explictly before starting the `IOLoop`,
so that code run at startup time can find the right
instance.
"""
IOLoop._current.instance = self
@staticmethod
def clear_current():
IOLoop._current.instance = None
@classmethod
def configurable_base(cls):
return IOLoop
@classmethod
def configurable_default(cls):
if hasattr(select, "epoll"):
from tornado.platform.epoll import EPollIOLoop
return EPollIOLoop
if hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
from tornado.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from tornado.platform.select import SelectIOLoop
return SelectIOLoop
def initialize(self):
pass
def close(self, all_fds=False):
"""Closes the `IOLoop`, freeing any resources used.
If ``all_fds`` is true, all file descriptors registered on the
IOLoop will be closed (not just the ones created by the
`IOLoop` itself).
Many applications will only use a single `IOLoop` that runs for the
entire lifetime of the process. In that case closing the `IOLoop`
is not necessary since everything will be cleaned up when the
process exits. `IOLoop.close` is provided mainly for scenarios
such as unit tests, which create and destroy a large number of
``IOLoops``.
An `IOLoop` must be completely stopped before it can be closed. This
means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
be allowed to return before attempting to call `IOLoop.close()`.
Therefore the call to `close` will usually appear just after
the call to `start` rather than near the call to `stop`.
.. versionchanged:: 3.1
If the `IOLoop` implementation supports non-integer objects
for "file descriptors", those objects will have their
``close`` method when ``all_fds`` is true.
"""
raise NotImplementedError()
def add_handler(self, fd, handler, events):
"""Registers the given handler to receive the given events for ``fd``.
The ``fd`` argument may either be an integer file descriptor or
a file-like object with a ``fileno()`` method (and optionally a
``close()`` method, which may be called when the `IOLoop` is shut
down).
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def update_handler(self, fd, events):
"""Changes the events we listen for ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def remove_handler(self, fd):
"""Stop listening for events on ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def set_blocking_signal_threshold(self, seconds, action):
"""Sends a signal if the `IOLoop` is blocked for more than
``s`` seconds.
Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
platform.
The action parameter is a Python signal handler. Read the
documentation for the `signal` module for more information.
If ``action`` is None, the process will be killed if it is
blocked for too long.
"""
raise NotImplementedError()
def set_blocking_log_threshold(self, seconds):
"""Logs a stack trace if the `IOLoop` is blocked for more than
``s`` seconds.
Equivalent to ``set_blocking_signal_threshold(seconds,
self.log_stack)``
"""
self.set_blocking_signal_threshold(seconds, self.log_stack)
def log_stack(self, signal, frame):
"""Signal handler to log the stack trace of the current thread.
For use with `set_blocking_signal_threshold`.
"""
gen_log.warning('IOLoop blocked for %f seconds in\n%s',
self._blocking_signal_threshold,
''.join(traceback.format_stack(frame)))
def start(self):
"""Starts the I/O loop.
The loop will run until one of the callbacks calls `stop()`, which
will make the loop stop after the current event iteration completes.
"""
raise NotImplementedError()
def _setup_logging(self):
"""The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses.
"""
if not any([logging.getLogger().handlers,
logging.getLogger('tornado').handlers,
logging.getLogger('tornado.application').handlers]):
logging.basicConfig()
def stop(self):
"""Stop the I/O loop.
If the event loop is not currently running, the next call to `start()`
will return immediately.
To use asynchronous methods from otherwise-synchronous code (such as
unit tests), you can start and stop the event loop like this::
ioloop = IOLoop()
async_method(ioloop=ioloop, callback=ioloop.stop)
ioloop.start()
``ioloop.start()`` will return after ``async_method`` has run
its callback, whether that callback was invoked before or
after ``ioloop.start``.
Note that even after `stop` has been called, the `IOLoop` is not
completely stopped until `IOLoop.start` has also returned.
Some work that was scheduled before the call to `stop` may still
be run before the `IOLoop` shuts down.
"""
raise NotImplementedError()
def run_sync(self, func, timeout=None):
"""Starts the `IOLoop`, runs the given function, and stops the loop.
If the function returns a `.Future`, the `IOLoop` will run
until the future is resolved. If it raises an exception, the
`IOLoop` will stop and the exception will be re-raised to the
caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `TimeoutError` is raised.
This method is useful in conjunction with `tornado.gen.coroutine`
to allow asynchronous calls in a ``main()`` function::
@gen.coroutine
def main():
# do stuff...
if __name__ == '__main__':
IOLoop.instance().run_sync(main)
"""
future_cell = [None]
def run():
try:
result = func()
except Exception:
future_cell[0] = TracebackFuture()
future_cell[0].set_exc_info(sys.exc_info())
else:
if is_future(result):
future_cell[0] = result
else:
future_cell[0] = TracebackFuture()
future_cell[0].set_result(result)
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
timeout_handle = self.add_timeout(self.time() + timeout, self.stop)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
if not future_cell[0].done():
raise TimeoutError('Operation timed out after %s seconds' % timeout)
return future_cell[0].result()
def time(self):
"""Returns the current time according to the `IOLoop`'s clock.
The return value is a floating-point number relative to an
unspecified time in the past.
By default, the `IOLoop`'s time function is `time.time`. However,
it may be configured to use e.g. `time.monotonic` instead.
Calls to `add_timeout` that pass a number instead of a
`datetime.timedelta` should use this function to compute the
appropriate time, so they can work no matter what time function
is chosen.
"""
return time.time()
def add_timeout(self, deadline, callback):
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
"""
raise NotImplementedError()
def remove_timeout(self, timeout):
"""Cancels a pending timeout.
The argument is a handle as returned by `add_timeout`. It is
safe to call `remove_timeout` even if the callback has already
been run.
"""
raise NotImplementedError()
def add_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
It is safe to call this method from any thread at any time,
except from a signal handler. Note that this is the **only**
method in `IOLoop` that makes this thread-safety guarantee; all
other interaction with the `IOLoop` must be done from that
`IOLoop`'s thread. `add_callback()` may be used to transfer
control from other threads to the `IOLoop`'s thread.
To add a callback from a signal handler, see
`add_callback_from_signal`.
"""
raise NotImplementedError()
def add_callback_from_signal(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
Safe for use from a Python signal handler; should not be used
otherwise.
Callbacks added with this method will be run without any
`.stack_context`, to avoid picking up the context of the function
that was interrupted by the signal.
"""
raise NotImplementedError()
def add_future(self, future, callback):
"""Schedules a callback on the ``IOLoop`` when the given
`.Future` is finished.
The callback is invoked with one argument, the
`.Future`.
"""
assert is_future(future)
callback = stack_context.wrap(callback)
future.add_done_callback(
lambda future: self.add_callback(callback, future))
def _run_callback(self, callback):
"""Runs a callback with error handling.
For use in subclasses.
"""
try:
callback()
except Exception:
self.handle_callback_exception(callback)
def handle_callback_exception(self, callback):
"""This method is called whenever a callback run by the `IOLoop`
throws an exception.
By default simply logs the exception as an error. Subclasses
may override this method to customize reporting of exceptions.
The exception itself is not passed explicitly, but is available
in `sys.exc_info`.
"""
app_log.error("Exception in callback %r", callback, exc_info=True)
def split_fd(self, fd):
"""Returns an (fd, obj) pair from an ``fd`` parameter.
We accept both raw file descriptors and file-like objects as
input to `add_handler` and related methods. When a file-like
object is passed, we must retain the object itself so we can
close it correctly when the `IOLoop` shuts down, but the
poller interfaces favor file descriptors (they will accept
file-like objects and call ``fileno()`` for you, but they
always return the descriptor itself).
This method is provided for use by `IOLoop` subclasses and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
return fd.fileno(), fd
except AttributeError:
return fd, fd
def close_fd(self, fd):
"""Utility method to close an ``fd``.
If ``fd`` is a file-like object, we close it directly; otherwise
we use `os.close`.
This method is provided for use by `IOLoop` subclasses (in
implementations of ``IOLoop.close(all_fds=True)`` and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
try:
fd.close()
except AttributeError:
os.close(fd)
except OSError:
pass
class PollIOLoop(IOLoop):
"""Base class for IOLoops built around a select-like function.
For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
(Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
`tornado.platform.select.SelectIOLoop` (all platforms).
"""
def initialize(self, impl, time_func=None):
super(PollIOLoop, self).initialize()
self._impl = impl
if hasattr(self._impl, 'fileno'):
set_close_exec(self._impl.fileno())
self.time_func = time_func or time.time
self._handlers = {}
self._events = {}
self._callbacks = []
self._callback_lock = threading.Lock()
self._timeouts = []
self._cancellations = 0
self._running = False
self._stopped = False
self._closing = False
self._thread_ident = None
self._blocking_signal_threshold = None
self._timeout_counter = itertools.count()
# Create a pipe that we send bogus data to when we want to wake
# the I/O loop when it is idle
self._waker = Waker()
self.add_handler(self._waker.fileno(),
lambda fd, events: self._waker.consume(),
self.READ)
def close(self, all_fds=False):
with self._callback_lock:
self._closing = True
self.remove_handler(self._waker.fileno())
if all_fds:
for fd, handler in self._handlers.values():
self.close_fd(fd)
self._waker.close()
self._impl.close()
self._callbacks = None
self._timeouts = None
def add_handler(self, fd, handler, events):
fd, obj = self.split_fd(fd)
self._handlers[fd] = (obj, stack_context.wrap(handler))
self._impl.register(fd, events | self.ERROR)
def update_handler(self, fd, events):
fd, obj = self.split_fd(fd)
self._impl.modify(fd, events | self.ERROR)
def remove_handler(self, fd):
fd, obj = self.split_fd(fd)
self._handlers.pop(fd, None)
self._events.pop(fd, None)
try:
self._impl.unregister(fd)
except Exception:
gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
def set_blocking_signal_threshold(self, seconds, action):
if not hasattr(signal, "setitimer"):
gen_log.error("set_blocking_signal_threshold requires a signal module "
"with the setitimer method")
return
self._blocking_signal_threshold = seconds
if seconds is not None:
signal.signal(signal.SIGALRM,
action if action is not None else signal.SIG_DFL)
def start(self):
if self._running:
raise RuntimeError("IOLoop is already running")
self._setup_logging()
if self._stopped:
self._stopped = False
return
old_current = getattr(IOLoop._current, "instance", None)
IOLoop._current.instance = self
self._thread_ident = thread.get_ident()
self._running = True
# signal.set_wakeup_fd closes a race condition in event loops:
# a signal may arrive at the beginning of select/poll/etc
# before it goes into its interruptible sleep, so the signal
# will be consumed without waking the select. The solution is
# for the (C, synchronous) signal handler to write to a pipe,
# which will then be seen by select.
#
# In python's signal handling semantics, this only matters on the
# main thread (fortunately, set_wakeup_fd only works on the main
# thread and will raise a ValueError otherwise).
#
# If someone has already set a wakeup fd, we don't want to
# disturb it. This is an issue for twisted, which does its
# SIGCHILD processing in response to its own wakeup fd being
# written to. As long as the wakeup fd is registered on the IOLoop,
# the loop will still wake up and everything should work.
old_wakeup_fd = None
if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
# requires python 2.6+, unix. set_wakeup_fd exists but crashes
# the python process on windows.
try:
old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
if old_wakeup_fd != -1:
# Already set, restore previous value. This is a little racy,
# but there's no clean get_wakeup_fd and in real use the
# IOLoop is just started once at the beginning.
signal.set_wakeup_fd(old_wakeup_fd)
old_wakeup_fd = None
except ValueError: # non-main thread
pass
try:
while True:
poll_timeout = _POLL_TIMEOUT
# Prevent IO event starvation by delaying new callbacks
# to the next iteration of the event loop.
with self._callback_lock:
callbacks = self._callbacks
self._callbacks = []
for callback in callbacks:
self._run_callback(callback)
# Closures may be holding on to a lot of memory, so allow
# them to be freed before we go into our poll wait.
callbacks = callback = None
if self._timeouts:
now = self.time()
while self._timeouts:
if self._timeouts[0].callback is None:
# the timeout was cancelled
heapq.heappop(self._timeouts)
self._cancellations -= 1
elif self._timeouts[0].deadline <= now:
timeout = heapq.heappop(self._timeouts)
self._run_callback(timeout.callback)
del timeout
else:
seconds = self._timeouts[0].deadline - now
poll_timeout = min(seconds, poll_timeout)
break
if (self._cancellations > 512
and self._cancellations > (len(self._timeouts) >> 1)):
# Clean up the timeout queue when it gets large and it's
# more than half cancellations.
self._cancellations = 0
self._timeouts = [x for x in self._timeouts
if x.callback is not None]
heapq.heapify(self._timeouts)
if self._callbacks:
# If any callbacks or timeouts called add_callback,
# we don't want to wait in poll() before we run them.
poll_timeout = 0.0
if not self._running:
break
if self._blocking_signal_threshold is not None:
# clear alarm so it doesn't fire while poll is waiting for
# events.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
try:
event_pairs = self._impl.poll(poll_timeout)
except Exception as e:
# Depending on python version and IOLoop implementation,
# different exception types may be thrown and there are
# two ways EINTR might be signaled:
# * e.errno == errno.EINTR
# * e.args is like (errno.EINTR, 'Interrupted system call')
if errno_from_exception(e) == errno.EINTR:
continue
else:
raise
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL,
self._blocking_signal_threshold, 0)
# Pop one fd at a time from the set of pending fds and run
# its handler. Since that handler may perform actions on
# other file descriptors, there may be reentrant calls to
# this IOLoop that update self._events
self._events.update(event_pairs)
while self._events:
fd, events = self._events.popitem()
try:
fd_obj, handler_func = self._handlers[fd]
handler_func(fd_obj, events)
except (OSError, IOError) as e:
if errno_from_exception(e) == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
self.handle_callback_exception(self._handlers.get(fd))
except Exception:
self.handle_callback_exception(self._handlers.get(fd))
fd_obj = handler_func = None
finally:
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
IOLoop._current.instance = old_current
if old_wakeup_fd is not None:
signal.set_wakeup_fd(old_wakeup_fd)
def stop(self):
self._running = False
self._stopped = True
self._waker.wake()
def time(self):
return self.time_func()
def add_timeout(self, deadline, callback):
timeout = _Timeout(deadline, stack_context.wrap(callback), self)
heapq.heappush(self._timeouts, timeout)
return timeout
def remove_timeout(self, timeout):
# Removing from a heap is complicated, so just leave the defunct
# timeout object in the queue (see discussion in
# http://docs.python.org/library/heapq.html).
# If this turns out to be a problem, we could add a garbage
# collection pass whenever there are too many dead timeouts.
timeout.callback = None
self._cancellations += 1
def add_callback(self, callback, *args, **kwargs):
with self._callback_lock:
if self._closing:
raise RuntimeError("IOLoop is closing")
list_empty = not self._callbacks
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
if list_empty and thread.get_ident() != self._thread_ident:
# If we're in the IOLoop's thread, we know it's not currently
# polling. If we're not, and we added the first callback to an
# empty list, we may need to wake it up (it may wake up on its
# own, but an occasional extra wake is harmless). Waking
# up a polling IOLoop is relatively expensive, so we try to
# avoid it when we can.
self._waker.wake()
def add_callback_from_signal(self, callback, *args, **kwargs):
with stack_context.NullContext():
if thread.get_ident() != self._thread_ident:
# if the signal is handled on another thread, we can add
# it normally (modulo the NullContext)
self.add_callback(callback, *args, **kwargs)
else:
# If we're on the IOLoop's thread, we cannot use
# the regular add_callback because it may deadlock on
# _callback_lock. Blindly insert into self._callbacks.
# This is safe because the GIL makes list.append atomic.
# One subtlety is that if the signal interrupted the
# _callback_lock block in IOLoop.start, we may modify
# either the old or new version of self._callbacks,
# but either way will work.
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ['deadline', 'callback', 'tiebreaker']
def __init__(self, deadline, callback, io_loop):
if isinstance(deadline, numbers.Real):
self.deadline = deadline
elif isinstance(deadline, datetime.timedelta):
now = io_loop.time()
try:
self.deadline = now + deadline.total_seconds()
except AttributeError: # py2.6
self.deadline = now + _Timeout.timedelta_to_seconds(deadline)
else:
raise TypeError("Unsupported deadline %r" % deadline)
self.callback = callback
self.tiebreaker = next(io_loop._timeout_counter)
@staticmethod
def timedelta_to_seconds(td):
"""Equivalent to td.total_seconds() (introduced in python 2.7)."""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def __lt__(self, other):
return ((self.deadline, self.tiebreaker) <
(other.deadline, other.tiebreaker))
def __le__(self, other):
return ((self.deadline, self.tiebreaker) <=
(other.deadline, other.tiebreaker))
class PeriodicCallback(object):
"""Schedules the given callback to be called periodically.
The callback is called every ``callback_time`` milliseconds.
`start` must be called after the `PeriodicCallback` is created.
"""
def __init__(self, callback, callback_time, io_loop=None):
self.callback = callback
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self.io_loop = io_loop or IOLoop.current()
self._running = False
self._timeout = None
def start(self):
"""Starts the timer."""
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer."""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def _run(self):
if not self._running:
return
try:
self.callback()
except Exception:
self.io_loop.handle_callback_exception(self.callback)
self._schedule_next()
def _schedule_next(self):
if self._running:
current_time = self.io_loop.time()
while self._next_timeout <= current_time:
self._next_timeout += self.callback_time / 1000.0
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
|
the-stack_0_13189 | import cv2
import numpy as np
from pynput.keyboard import Key, Controller
import time
cap = cv2.VideoCapture(0)
facecascade = cv2.CascadeClassifier(r'C:\Users\yashd\Documents\Python\cv2_Face\haarcascade_frontalface_default.xml')
keyboard = Controller()
time.sleep(7)
while True:
ret,img = cap.read()
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = facecascade.detectMultiScale(gray,1.3,5)
cv2.line(img, (200, 0), (200, 700), (0, 255, 0), 2)
cv2.line(img, (410, 0), (410, 700), (0, 255, 0), 2)
cv2.line(img, (0, 200), (700, 200), (0, 255, 0), 2)
cv2.putText(img, "Copyright: Yash", (340, 470), cv2.QT_FONT_NORMAL, 1, (255, 0, 0), 2)
#keyboard.press('p')
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
cv2.putText(img,"Active",(30,30),cv2.QT_FONT_NORMAL,1,(0,0,255),2)
print(x,y,x+w,y+h)
#keyboard.release(('p'))
if x<100:
print("Right Arrow Key")
keyboard.press(Key.right)
if x>400:
print("Left Arrow Key")
keyboard.press(Key.left)
if y<190:
print("Up Arrow Key")
keyboard.press(Key.up)
cv2.imshow("Image",img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows() |
the-stack_0_13190 | """
Wrapper for AWS Lambda.
"""
from __future__ import absolute_import
import traceback
import time
import functools
import warnings
from uuid import uuid4
import epsagon.trace
import epsagon.runners.aws_lambda
import epsagon.triggers.aws_lambda
import epsagon.wrappers.python_function
import epsagon.runners.python_function
from epsagon.common import EpsagonWarning
from .. import constants
STEP_DICT_NAME = 'Epsagon'
def lambda_wrapper(func):
"""Epsagon's Lambda wrapper."""
@functools.wraps(func)
def _lambda_wrapper(*args, **kwargs):
epsagon.trace.tracer.prepare()
try:
event, context = args
except ValueError:
# This can happen when someone manually calls handler without
# parameters / sends kwargs. In such case we ignore this trace.
return func(*args, **kwargs)
try:
runner = epsagon.runners.aws_lambda.LambdaRunner(
time.time(),
context
)
# pylint: disable=W0703
except Exception as exception:
# Regress to python runner.
warnings.warn(
'Lambda context is invalid, using simple python wrapper',
EpsagonWarning
)
epsagon.trace.tracer.add_exception(
exception,
traceback.format_exc()
)
return epsagon.wrappers.python_function.wrap_python_function(
func,
args,
kwargs
)
constants.COLD_START = False
try:
epsagon.trace.tracer.add_event(
epsagon.triggers.aws_lambda.LambdaTriggerFactory.factory(
time.time(),
event,
context
)
)
# pylint: disable=W0703
except Exception as exception:
epsagon.trace.tracer.add_exception(
exception,
traceback.format_exc(),
additional_data={'event': event}
)
try:
result = func(*args, **kwargs)
return result
except Exception as exception:
runner.set_exception(exception, traceback.format_exc())
raise
finally:
epsagon.trace.tracer.add_event(runner)
epsagon.trace.tracer.send_traces()
return _lambda_wrapper
def step_lambda_wrapper(func):
"""Epsagon's Step Lambda wrapper."""
@functools.wraps(func)
def _lambda_wrapper(*args, **kwargs):
epsagon.trace.tracer.prepare()
event, context = args
try:
runner = epsagon.runners.aws_lambda.StepLambdaRunner(
time.time(),
context
)
# pylint: disable=W0703
except Exception as exception:
# Regress to python runner.
warnings.warn(
'Lambda context is invalid, using simple python wrapper',
EpsagonWarning
)
epsagon.trace.tracer.add_exception(
exception,
traceback.format_exc()
)
return epsagon.wrappers.python_function.wrap_python_function(
func,
args,
kwargs
)
constants.COLD_START = False
try:
epsagon.trace.tracer.add_event(
epsagon.triggers.aws_lambda.LambdaTriggerFactory.factory(
time.time(),
event,
context
)
)
# pylint: disable=W0703
except Exception as exception:
epsagon.trace.tracer.add_exception(
exception,
traceback.format_exc()
)
try:
result = func(*args, **kwargs)
# Add step functions data only if the result is a dictionary.
if isinstance(result, dict):
# If the step functions data is not present, then this is the
# First step.
if STEP_DICT_NAME not in event:
steps_dict = {'id': str(uuid4()), 'step_num': 0}
# Otherwise, just advance the steps number by one.
else:
steps_dict = event[STEP_DICT_NAME]
steps_dict['step_num'] += 1
result[STEP_DICT_NAME] = steps_dict
runner.add_step_data(steps_dict)
return result
except Exception as exception:
runner.set_exception(exception, traceback.format_exc())
raise
finally:
epsagon.trace.tracer.add_event(runner)
epsagon.trace.tracer.send_traces()
return _lambda_wrapper
|
the-stack_0_13192 | # cellfreesim functions.py
import matplotlib.pyplot as plt
import tellurium as te # 2.1.5
import numpy as np
import sys
import progressbar as progressbar
import emcee
from scipy.integrate import odeint
#-----------------------------------------------------------------------------
# Define parsing functions
#-----------------------------------------------------------------------------
def parseODEs(r,odes):
# Parsing of ODEs into cython code
# Split odes into channels and derivatives (normally these separated by two spaces)
parts = odes.split('\n\n')
channels = parts[0].lstrip('\n').split('\n')
derivs = parts[1].rstrip('\n').split('\n')
channeldict = {}
for channel in channels:
channeldict[channel.split(' = ')[0]] = channel.split(' = ')[1]
derivdict = {}
for deriv in derivs:
derivdict[deriv.split(' = ')[0]] = deriv.split(' = ')[1]
print(derivdict)
print(channeldict)
speciesIds = []
derivatives = []
for derivkey in derivdict.keys():
speciesIds.append(derivkey[1:-3]) # Hardcoded d/dt
channelkey = derivdict[derivkey]
if channelkey[0]=='-':
derivatives.append('-'+channeldict[channelkey[1:]])
else:
derivatives.append(channeldict[channelkey])
speciesValues = r.getFloatingSpeciesConcentrations()
parameterIds = r.getGlobalParameterIds()
parameterValues = [value for value in r.getGlobalParameterValues()]
return(speciesIds, speciesValues, parameterIds, parameterValues, derivatives)
def writeCython(speciesIds,speciesValues,parameterIds,parameterValues,derivatives,OUTPATH,FILENAME):
# Generate cython script
with open(OUTPATH+FILENAME,'w') as file:
file.writelines('# Cythonized ODEs from antimony file\n\n')
# Imports
file.writelines('import numpy as np\n')
file.writelines('cimport numpy as np\n')
file.writelines('cimport cython\n')
file.writelines('from libc.math cimport exp\n')
file.writelines('from libc.math cimport sqrt\n')
file.writelines('from libc.math cimport pow\n\n')
# Model definition
file.writelines('@cython.cdivision(True) # Zero-division checking turned off\n')
file.writelines('@cython.boundscheck(False) # Bounds checking turned off for this function\n')
file.writelines('@cython.wraparound(False) # turn off negative index wrapping for entire function\n')
file.writelines('def model(np.ndarray[np.float64_t,ndim=1] y, double t, np.ndarray[np.float64_t,ndim=1] params):\n\n')
# Species
for i in range(len(speciesIds)):
file.write('\tcdef double '+speciesIds[i]+' = y['+str(i)+']\n')
file.writelines('\n')
for i in range(len(parameterIds)):
file.write('\tcdef double '+parameterIds[i]+' = params['+str(i)+']\n')
file.writelines('\n')
file.writelines('\tcdef double derivs['+str(len(derivatives))+']\n')
file.writelines('\n')
file.writelines('\tderivs = [\n')
for i in range(len(derivatives)-1):
file.write('\t'+derivatives[i]+',\n')
file.write('\t'+derivatives[len(derivatives)-1]+']\n')
file.write('\treturn derivs\n')
file.close()
#-----------------------------------------------------------------------------
# Define experiment functions
#-----------------------------------------------------------------------------
def chemostatExperiment(chemostatinputs):
# Run chemostat experiment
dilutiontimes = chemostatinputs['dilutiontimes']
y0 = chemostatinputs['y0']
params = chemostatinputs['params']
INTERVAL_IMG = chemostatinputs['interval_img']
DIL_FRAC = chemostatinputs['dil_frac']
INDEX_REFRESH = chemostatinputs['index_refresh']
CONC_REFRESH = chemostatinputs['conc_refresh']
cymodel = chemostatinputs['cymodel']
ndim = y0.shape[0]
# 1. From dilutiontimes, calculate time interval between dilution steps
interval_dil=np.zeros(dilutiontimes.shape[0]-1)
for i in range(dilutiontimes.shape[0]-1):
interval_dil[i] = dilutiontimes[i+1]-dilutiontimes[i]
# 2. Calulate number of steps in each time interval (depends on imaging frequency)
nStepsPerRun = np.zeros(dilutiontimes.shape[0]-1)
for i in range(dilutiontimes.shape[0]-1):
nStepsPerRun[i]=int(interval_dil[i]/INTERVAL_IMG)+1
# 3. Put time intervals together to make total time axis, and initialise output array
timeProgram = {}
timeTotal = np.zeros(1)
for i in range(len(dilutiontimes)-1):
timeProgram[i] = np.linspace(0,interval_dil[i],nStepsPerRun[i])
timeTotal=np.concatenate([timeTotal,timeProgram[i][1:]+timeTotal[-1]],axis=0)
dataout=np.zeros(len(timeTotal)*ndim).reshape(len(timeTotal),ndim)
indStart = int(1)
yTransfer = y0
for i in range(len(dilutiontimes)-1):
psoln = odeint(cymodel, yTransfer, timeProgram[i], args=(params,),mxstep=5000000) # scipy-Fortran RK4 solver
indStop= indStart+int(nStepsPerRun[i]-1)
dataout[indStart:indStop,:]=psoln[1:]
indStart = indStop
# Dilute everything and refresh appropriate species
yTransfer = psoln[-1,:]*(1-DIL_FRAC)
j=0
for ind in INDEX_REFRESH:
yTransfer[ind] = yTransfer[ind]+DIL_FRAC*CONC_REFRESH[j]
j+=1
dataout[0,:] = y0
return(timeTotal, dataout)
#-----------------------------------------------------------------------------
# Define MCMC functions
#-----------------------------------------------------------------------------
def normalprior(param,mu,sigma):
'''Log of the normal prior'''
return np.log( 1.0 / (np.sqrt(2*np.pi)*sigma) ) - 0.5*(param - mu)**2/sigma**2
def lnlike(theta, chemostatinputs, mcmc_inputs):
''' Log likelihood, the function to maximise'''
lnparams = [j for j in theta]
# This has to be an array if more than one parameter, otherwise just a float
paramstmp = chemostatinputs['params']
for i in range(len(mcmc_inputs['paramchannels'])):
paramstmp[int(mcmc_inputs['paramchannels'][i])] = np.exp(lnparams[i])
chemostatinputs['params'] = paramstmp
timeTotal,sim_dataout = chemostatExperiment(chemostatinputs)
X0s = []
for j in range(len(mcmc_inputs['datachannels'])):
y_obs = mcmc_inputs['data'][j]
y_model = sim_dataout[:,int(mcmc_inputs['datachannels'][j])]
INVS2 = 1/mcmc_inputs['yerr']**2
X0=-0.5*(np.sum((y_obs-y_model)**2*INVS2+np.log(2*np.pi*1/INVS2)))
X0s.append(X0)
return sum(X0s)
def lnprior(theta, mcmc_inputs):
''' Log priors'''
lnparams = [j for j in theta]
priorMus = mcmc_inputs['priorMuSigma'][0]
priorSigmas = mcmc_inputs['priorMuSigma'][1]
log_PRs = []
for j in range(len(lnparams)):
log_PRs.append(normalprior(lnparams[j],priorMus[j],priorSigmas[j]))
return np.sum(log_PRs)
def lnprob(theta,chemostatinputs, mcmc_inputs):
''' Log posterior'''
lp = lnprior(theta, mcmc_inputs)
if not np.isfinite(lp):
return -np.inf
# How to properly account for NaNs turning up in lnlike?
# This is NOT the way to do it:
if np.isnan(lp + lnlike(theta,chemostatinputs, mcmc_inputs)):
return -np.inf
else:
return lp + lnlike(theta,chemostatinputs, mcmc_inputs)
def gelman_rubin(chain):
''' Gelman-Rubin diagnostic for one walker across all parameters. This value should tend to 1. '''
ssq=np.var(chain,axis=1,ddof=1)
W=np.mean(ssq,axis=0)
Tb=np.mean(chain,axis=1)
Tbb=np.mean(Tb,axis=0)
m=chain.shape[0]*1.0
n=chain.shape[1]*1.0
B=n/(m-1)*np.sum((Tbb-Tb)**2,axis=0)
varT=(n-1)/n*W+1/n*B
Rhat=np.sqrt(varT/W)
return Rhat
def runMCMC(speciesValues,parameterValues,TMAX,INTERVAL_DIL,INTERVAL_IMG,
dilutiontimes,DIL_FRAC,INDEX_REFRESH,CONC_REFRESH,model,
DATACHANNELS,PARAMCHANNELS,PMUSIGMA,SIGMA,
iterations,nwalkers,nDimParams,threads,pos,tburn):
y0 = np.array([float(value) for value in speciesValues])
params = np.array([float(value) for value in parameterValues])
if TMAX%INTERVAL_DIL!=0:
print('\n')
print('TMAX is not divisible by INTERVAL_DIL!\n')
print('Inaccurate results expected!\n')
if INTERVAL_DIL%INTERVAL_IMG!=0:
print('\n')
print('INTERVAL_DIL is not divisible by INTERVAL_IMG!\n')
print('Inaccurate results expected!\n')
cinputkeys = ['dilutiontimes', 'y0', 'params', 'interval_img', 'dil_frac', 'index_refresh', 'conc_refresh','cymodel']
cinputvalues = [dilutiontimes, y0, params, INTERVAL_IMG, DIL_FRAC, INDEX_REFRESH, CONC_REFRESH, model.model]
chemostatinputs = dict(zip(cinputkeys,cinputvalues))
# Generate silico data
timeTotal,dataout = chemostatExperiment(chemostatinputs)
mcmc_inputs = {}
mcmc_inputs['data'] = [dataout[:,channel] for channel in DATACHANNELS]
mcmc_inputs['priorMuSigma'] = PMUSIGMA
mcmc_inputs['yerr'] = SIGMA
mcmc_inputs['datachannels'] = DATACHANNELS
mcmc_inputs['paramchannels'] = PARAMCHANNELS
##### The rest of the code is automatic #####
sampler=emcee.EnsembleSampler(nwalkers,nDimParams,lnprob,a=2,args=([chemostatinputs, mcmc_inputs]),threads=threads)
### Start MCMC
iter=iterations
bar=progressbar.ProgressBar(max_value=iter)
for i, result in enumerate(sampler.sample(pos, iterations=iter)):
bar.update(i)
### Finish MCMC
samples=sampler.chain[:,:,:].reshape((-1,nDimParams)) # shape = (nsteps, nDimParams)
samplesnoburn=sampler.chain[:,tburn:,:].reshape((-1,nDimParams)) # shape = (nsteps, nDimParams)
return(samplesnoburn, chemostatinputs, mcmc_inputs, timeTotal, dataout)
# Plotting
def plotInitialise(figW,figH):
plt.close("all")
figure_options={'figsize':(figW,figH)} # figure size in inches. A4=11.7x8.3, A5=8.3,5.8
font_options={'size':'14','family':'sans-serif','sans-serif':'Arial'}
plt.rc('figure', **figure_options)
plt.rc('font', **font_options)
def plotFormat(ax,xlabel=False,
ylabel=False,
xlim=False,
ylim=False,
title=False,
xticks=False,
yticks=False,
logx=False,
logy=False,
logxy=False,
symlogx=False,
legend=False):
# Set titles and labels
if title!=False:
ax.set_title(title)
if xlabel!=False:
ax.set_xlabel(xlabel, labelpad=12)
if ylabel!=False:
ax.set_ylabel(ylabel, labelpad=12)
# Set axis limits
if xlim!=False:
ax.set_xlim(xlim)
if ylim!=False:
ax.set_ylim(ylim)
# Set tick values
if xticks!=False:
ax.set_xticks(xticks)
if yticks!=False:
ax.set_yticks(yticks)
# Set line thicknesses
#ax.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter("%1.e"))
#ax.axhline(linewidth=2, color='k')
#ax.axvline(linewidth=2, color='k')
ax.spines['bottom'].set_linewidth(2)
ax.spines['top'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
ax.spines['right'].set_linewidth(2)
# Set ticks
if logx==True:
ax.set_xscale("log")
elif logy==True:
ax.set_yscale("log")
elif logxy==True:
ax.set_xscale("log")
ax.set_yscale("log")
elif symlogx==True:
ax.set_xscale("symlog",linthreshx=1e-4)
ax.set_yscale("log")
else:
minorLocatorx=AutoMinorLocator(2) # Number of minor intervals per major interval
minorLocatory=AutoMinorLocator(2)
ax.xaxis.set_minor_locator(minorLocatorx)
ax.yaxis.set_minor_locator(minorLocatory)
ax.tick_params(which='major', width=2, length=8, pad=9,direction='in',top='on',right='on')
ax.tick_params(which='minor', width=2, length=4, pad=9,direction='in',top='on',right='on')
if legend==True:
ax.legend(loc='upper right', fontsize=14,numpoints=1) ### Default 'best'
|
the-stack_0_13195 | import torch
import torch.nn as nn
import torch.nn.functional as F
class SimpleNNModel(nn.Module):
def __init__(self):
super(SimpleNNModel, self).__init__()
self.layer1 = nn.Linear(32*32, 512)
self.layer2 = nn.Linear(512, 32)
self.layer3 = nn.Linear(32, 10)
self.loss_fn = nn.CrossEntropyLoss()
self.optimizer = torch.optim.Adam(self.parameters(), lr=0.001)
def forward(self, inputs):
batch_size = inputs.shape[0]
# Convert images to grayscale
x = (inputs[:, 0, :, :] + inputs[:, 1, :, :] + inputs[:, 2, :, :])/3
# Flatten the image
x = x.view(batch_size, -1)
h = F.relu(self.layer1(x))
h = F.relu(self.layer2(h))
out = F.softmax(self.layer3(h), dim=1)
return out
|
the-stack_0_13197 | #!/usr/bin/env python3
from __future__ import print_function
# Get all of the videos for the shows.
PBSKIDS_SHOWS = "http://pbskids.org/pbsk/video/api/getShows/"
PBSKIDS_VIDS = "http://pbskids.org/pbsk/video/api/getVideos/"
VIDEOS_CACHE = "videos.json"
# Find the shows. Write to shows.json.
import requests
import json
# Create list of all videos
all_videos = list()
# Start index
start_index = 1
# To bootstrap the while loop.
total_videos = start_index + 1
# While our start index is less than the total number of videos
while start_index < total_videos:
# Only get full episodes. Can be of type 'Episode' or 'Clip'.
resp = requests.get(PBSKIDS_VIDS, params={'type': 'Episode',
'status': 'available',
'startindex': start_index} )
video_list = json.loads(resp.text)
# These should always be the same since we are requesting the startindex
if video_list["start"] != start_index:
raise("Returned start index doesn't match requested @ startIdx={}".format(start_index))
# Get total number of videos.
total_videos = video_list["matched"]
print("Grabbing video data: {}-{} of {}".format(video_list["start"],
video_list["end"],
video_list["matched"]))
start_index = video_list["end"] + 1
for item in video_list["items"]:
all_videos.append(item)
# Write to cache.
with open(VIDEOS_CACHE, 'w') as outfile:
json.dump(all_videos, outfile)
# Reload from the file, just to be sure.
with open(VIDEOS_CACHE, 'r') as infile:
all_videos2 = json.load(infile)
assert(all_videos == all_videos2)
print("Writing Cache: "+VIDEOS_CACHE) |
the-stack_0_13200 | # qubit number=4
# total number=41
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=13
prog.h(input_qubit[3]) # number=23
prog.cz(input_qubit[0],input_qubit[3]) # number=24
prog.y(input_qubit[1]) # number=37
prog.h(input_qubit[3]) # number=25
prog.x(input_qubit[3]) # number=18
prog.cx(input_qubit[0],input_qubit[3]) # number=19
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=32
prog.cx(input_qubit[3],input_qubit[0]) # number=20
prog.h(input_qubit[0]) # number=38
prog.cz(input_qubit[3],input_qubit[0]) # number=39
prog.h(input_qubit[0]) # number=40
prog.z(input_qubit[3]) # number=27
prog.h(input_qubit[0]) # number=29
prog.cz(input_qubit[3],input_qubit[0]) # number=30
prog.h(input_qubit[0]) # number=31
prog.h(input_qubit[0]) # number=33
prog.cz(input_qubit[3],input_qubit[0]) # number=34
prog.h(input_qubit[0]) # number=35
prog.h(input_qubit[2]) # number=36
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC2805.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
the-stack_0_13201 | import os
import platform
import sys
from methods import get_compiler_version, using_gcc, using_clang
def is_active():
return True
def get_name():
return "X11"
def can_build():
if os.name != "posix" or sys.platform == "darwin":
return False
# Check the minimal dependencies
x11_error = os.system("pkg-config --version > /dev/null")
if x11_error:
return False
x11_error = os.system("pkg-config x11 --modversion > /dev/null ")
if x11_error:
return False
x11_error = os.system("pkg-config xcursor --modversion > /dev/null ")
if x11_error:
print("xcursor not found.. x11 disabled.")
return False
x11_error = os.system("pkg-config xinerama --modversion > /dev/null ")
if x11_error:
print("xinerama not found.. x11 disabled.")
return False
x11_error = os.system("pkg-config xext --modversion > /dev/null ")
if x11_error:
print("xext not found.. x11 disabled.")
return False
x11_error = os.system("pkg-config xrandr --modversion > /dev/null ")
if x11_error:
print("xrandr not found.. x11 disabled.")
return False
x11_error = os.system("pkg-config xrender --modversion > /dev/null ")
if x11_error:
print("xrender not found.. x11 disabled.")
return False
x11_error = os.system("pkg-config xi --modversion > /dev/null ")
if x11_error:
print("xi not found.. Aborting.")
return False
return True
def get_opts():
from SCons.Variables import BoolVariable, EnumVariable
return [
BoolVariable("use_llvm", "Use the LLVM compiler", False),
BoolVariable("use_lld", "Use the LLD linker", False),
BoolVariable("use_thinlto", "Use ThinLTO", False),
BoolVariable("use_static_cpp", "Link libgcc and libstdc++ statically for better portability", True),
BoolVariable("use_ubsan", "Use LLVM/GCC compiler undefined behavior sanitizer (UBSAN)", False),
BoolVariable("use_asan", "Use LLVM/GCC compiler address sanitizer (ASAN))", False),
BoolVariable("use_lsan", "Use LLVM/GCC compiler leak sanitizer (LSAN))", False),
BoolVariable("use_tsan", "Use LLVM/GCC compiler thread sanitizer (TSAN))", False),
BoolVariable("use_msan", "Use LLVM/GCC compiler memory sanitizer (MSAN))", False),
BoolVariable("pulseaudio", "Detect and use PulseAudio", True),
BoolVariable("udev", "Use udev for gamepad connection callbacks", True),
BoolVariable("debug_symbols", "Add debugging symbols to release/release_debug builds", True),
BoolVariable("separate_debug_symbols", "Create a separate file containing debugging symbols", False),
BoolVariable("touch", "Enable touch events", True),
BoolVariable("execinfo", "Use libexecinfo on systems where glibc is not available", False),
]
def get_flags():
return []
def configure(env):
## Build type
if env["target"] == "release":
if env["optimize"] == "speed": # optimize for speed (default)
env.Prepend(CCFLAGS=["-O3"])
elif env["optimize"] == "size": # optimize for size
env.Prepend(CCFLAGS=["-Os"])
if env["debug_symbols"]:
env.Prepend(CCFLAGS=["-g2"])
elif env["target"] == "release_debug":
if env["optimize"] == "speed": # optimize for speed (default)
env.Prepend(CCFLAGS=["-O2"])
elif env["optimize"] == "size": # optimize for size
env.Prepend(CCFLAGS=["-Os"])
env.Prepend(CPPDEFINES=["DEBUG_ENABLED"])
if env["debug_symbols"]:
env.Prepend(CCFLAGS=["-g2"])
elif env["target"] == "debug":
env.Prepend(CCFLAGS=["-ggdb"])
env.Prepend(CCFLAGS=["-g3"])
env.Prepend(CPPDEFINES=["DEBUG_ENABLED"])
env.Append(LINKFLAGS=["-rdynamic"])
## Architecture
is64 = sys.maxsize > 2 ** 32
if env["bits"] == "default":
env["bits"] = "64" if is64 else "32"
## Compiler configuration
if "CXX" in env and "clang" in os.path.basename(env["CXX"]):
# Convenience check to enforce the use_llvm overrides when CXX is clang(++)
env["use_llvm"] = True
if env["use_llvm"]:
if "clang++" not in os.path.basename(env["CXX"]):
env["CC"] = "clang"
env["CXX"] = "clang++"
env.extra_suffix = ".llvm" + env.extra_suffix
if env["use_lld"]:
if env["use_llvm"]:
env.Append(LINKFLAGS=["-fuse-ld=lld"])
if env["use_thinlto"]:
# A convenience so you don't need to write use_lto too when using SCons
env["use_lto"] = True
else:
print("Using LLD with GCC is not supported yet, try compiling with 'use_llvm=yes'.")
sys.exit(255)
if env["use_ubsan"] or env["use_asan"] or env["use_lsan"] or env["use_tsan"] or env["use_msan"]:
env.extra_suffix += "s"
if env["use_ubsan"]:
env.Append(
CCFLAGS=[
"-fsanitize=undefined,shift,shift-exponent,integer-divide-by-zero,unreachable,vla-bound,null,return,signed-integer-overflow,bounds,float-divide-by-zero,float-cast-overflow,nonnull-attribute,returns-nonnull-attribute,bool,enum,vptr,pointer-overflow,builtin"
]
)
if env["use_llvm"]:
env.Append(
CCFLAGS=[
"-fsanitize=nullability-return,nullability-arg,function,nullability-assign,implicit-integer-sign-change,implicit-signed-integer-truncation,implicit-unsigned-integer-truncation"
]
)
else:
env.Append(CCFLAGS=["-fsanitize=bounds-strict"])
env.Append(LINKFLAGS=["-fsanitize=undefined"])
if env["use_asan"]:
env.Append(CCFLAGS=["-fsanitize=address,pointer-subtract,pointer-compare"])
env.Append(LINKFLAGS=["-fsanitize=address"])
if env["use_lsan"]:
env.Append(CCFLAGS=["-fsanitize=leak"])
env.Append(LINKFLAGS=["-fsanitize=leak"])
if env["use_tsan"]:
env.Append(CCFLAGS=["-fsanitize=thread"])
env.Append(LINKFLAGS=["-fsanitize=thread"])
if env["use_msan"]:
env.Append(CCFLAGS=["-fsanitize=memory"])
env.Append(LINKFLAGS=["-fsanitize=memory"])
if env["use_lto"]:
if not env["use_llvm"] and env.GetOption("num_jobs") > 1:
env.Append(CCFLAGS=["-flto"])
env.Append(LINKFLAGS=["-flto=" + str(env.GetOption("num_jobs"))])
else:
if env["use_lld"] and env["use_thinlto"]:
env.Append(CCFLAGS=["-flto=thin"])
env.Append(LINKFLAGS=["-flto=thin"])
else:
env.Append(CCFLAGS=["-flto"])
env.Append(LINKFLAGS=["-flto"])
if not env["use_llvm"]:
env["RANLIB"] = "gcc-ranlib"
env["AR"] = "gcc-ar"
env.Append(CCFLAGS=["-pipe"])
env.Append(LINKFLAGS=["-pipe"])
# Check for gcc version >= 6 before adding -no-pie
version = get_compiler_version(env) or [-1, -1]
if using_gcc(env):
if version[0] >= 6:
env.Append(CCFLAGS=["-fpie"])
env.Append(LINKFLAGS=["-no-pie"])
# Do the same for clang should be fine with Clang 4 and higher
if using_clang(env):
if version[0] >= 4:
env.Append(CCFLAGS=["-fpie"])
env.Append(LINKFLAGS=["-no-pie"])
## Dependencies
env.ParseConfig("pkg-config x11 --cflags --libs")
env.ParseConfig("pkg-config xcursor --cflags --libs")
env.ParseConfig("pkg-config xinerama --cflags --libs")
env.ParseConfig("pkg-config xext --cflags --libs")
env.ParseConfig("pkg-config xrandr --cflags --libs")
env.ParseConfig("pkg-config xrender --cflags --libs")
env.ParseConfig("pkg-config xi --cflags --libs")
if env["touch"]:
env.Append(CPPDEFINES=["TOUCH_ENABLED"])
# FIXME: Check for existence of the libs before parsing their flags with pkg-config
# freetype depends on libpng and zlib, so bundling one of them while keeping others
# as shared libraries leads to weird issues
if env["builtin_freetype"] or env["builtin_libpng"] or env["builtin_zlib"]:
env["builtin_freetype"] = True
env["builtin_libpng"] = True
env["builtin_zlib"] = True
if not env["builtin_freetype"]:
env.ParseConfig("pkg-config freetype2 --cflags --libs")
if not env["builtin_libpng"]:
env.ParseConfig("pkg-config libpng16 --cflags --libs")
if not env["builtin_bullet"]:
# We need at least version 2.89
import subprocess
bullet_version = subprocess.check_output(["pkg-config", "bullet", "--modversion"]).strip()
if str(bullet_version) < "2.89":
# Abort as system bullet was requested but too old
print(
"Bullet: System version {0} does not match minimal requirements ({1}). Aborting.".format(
bullet_version, "2.89"
)
)
sys.exit(255)
env.ParseConfig("pkg-config bullet --cflags --libs")
if False: # not env['builtin_assimp']:
# FIXME: Add min version check
env.ParseConfig("pkg-config assimp --cflags --libs")
if not env["builtin_enet"]:
env.ParseConfig("pkg-config libenet --cflags --libs")
if not env["builtin_squish"]:
env.ParseConfig("pkg-config libsquish --cflags --libs")
if not env["builtin_zstd"]:
env.ParseConfig("pkg-config libzstd --cflags --libs")
# Sound and video libraries
# Keep the order as it triggers chained dependencies (ogg needed by others, etc.)
if not env["builtin_libtheora"]:
env["builtin_libogg"] = False # Needed to link against system libtheora
env["builtin_libvorbis"] = False # Needed to link against system libtheora
env.ParseConfig("pkg-config theora theoradec --cflags --libs")
else:
list_of_x86 = ["x86_64", "x86", "i386", "i586"]
if any(platform.machine() in s for s in list_of_x86):
env["x86_libtheora_opt_gcc"] = True
if not env["builtin_libvpx"]:
env.ParseConfig("pkg-config vpx --cflags --libs")
if not env["builtin_libvorbis"]:
env["builtin_libogg"] = False # Needed to link against system libvorbis
env.ParseConfig("pkg-config vorbis vorbisfile --cflags --libs")
if not env["builtin_opus"]:
env["builtin_libogg"] = False # Needed to link against system opus
env.ParseConfig("pkg-config opus opusfile --cflags --libs")
if not env["builtin_libogg"]:
env.ParseConfig("pkg-config ogg --cflags --libs")
if not env["builtin_libwebp"]:
env.ParseConfig("pkg-config libwebp --cflags --libs")
if not env["builtin_mbedtls"]:
# mbedTLS does not provide a pkgconfig config yet. See https://github.com/ARMmbed/mbedtls/issues/228
env.Append(LIBS=["mbedtls", "mbedcrypto", "mbedx509"])
if not env["builtin_wslay"]:
env.ParseConfig("pkg-config libwslay --cflags --libs")
if not env["builtin_miniupnpc"]:
# No pkgconfig file so far, hardcode default paths.
env.Prepend(CPPPATH=["/usr/include/miniupnpc"])
env.Append(LIBS=["miniupnpc"])
# On Linux wchar_t should be 32-bits
# 16-bit library shouldn't be required due to compiler optimisations
if not env["builtin_pcre2"]:
env.ParseConfig("pkg-config libpcre2-32 --cflags --libs")
# Embree is only compatible with x86_64. Yet another unreliable hack that will break
# cross-compilation, this will really need to be handle better. Thankfully only affects
# people who disable builtin_embree (likely distro packagers).
if env["tools"] and not env["builtin_embree"] and (is64 and platform.machine() == "x86_64"):
# No pkgconfig file so far, hardcode expected lib name.
env.Append(LIBS=["embree3"])
## Flags
if os.system("pkg-config --exists alsa") == 0: # 0 means found
print("Enabling ALSA")
env["alsa"] = True
env.Append(CPPDEFINES=["ALSA_ENABLED", "ALSAMIDI_ENABLED"])
else:
print("ALSA libraries not found, disabling driver")
if env["pulseaudio"]:
if os.system("pkg-config --exists libpulse") == 0: # 0 means found
print("Enabling PulseAudio")
env.Append(CPPDEFINES=["PULSEAUDIO_ENABLED"])
env.ParseConfig("pkg-config --cflags libpulse")
else:
print("PulseAudio development libraries not found, disabling driver")
if platform.system() == "Linux":
env.Append(CPPDEFINES=["JOYDEV_ENABLED"])
if env["udev"]:
if os.system("pkg-config --exists libudev") == 0: # 0 means found
print("Enabling udev support")
env.Append(CPPDEFINES=["UDEV_ENABLED"])
else:
print("libudev development libraries not found, disabling udev support")
else:
env["udev"] = False # Linux specific
# Linkflags below this line should typically stay the last ones
if not env["builtin_zlib"]:
env.ParseConfig("pkg-config zlib --cflags --libs")
env.Prepend(CPPPATH=["#platform/x11"])
env.Append(CPPDEFINES=["X11_ENABLED", "UNIX_ENABLED", "OPENGL_ENABLED", "GLES_ENABLED"])
env.Append(LIBS=["GL", "pthread"])
if platform.system() == "Linux":
env.Append(LIBS=["dl"])
if platform.system().find("BSD") >= 0:
env["execinfo"] = True
if env["execinfo"]:
env.Append(LIBS=["execinfo"])
if not env["tools"]:
import subprocess
import re
linker_version_str = subprocess.check_output([env.subst(env["LINK"]), "-Wl,--version"]).decode("utf-8")
gnu_ld_version = re.search("^GNU ld [^$]*(\d+\.\d+)$", linker_version_str, re.MULTILINE)
if not gnu_ld_version:
print(
"Warning: Creating template binaries enabled for PCK embedding is currently only supported with GNU ld"
)
else:
if float(gnu_ld_version.group(1)) >= 2.30:
env.Append(LINKFLAGS=["-T", "platform/x11/pck_embed.ld"])
else:
env.Append(LINKFLAGS=["-T", "platform/x11/pck_embed.legacy.ld"])
## Cross-compilation
if is64 and env["bits"] == "32":
env.Append(CCFLAGS=["-m32"])
env.Append(LINKFLAGS=["-m32", "-L/usr/lib/i386-linux-gnu"])
elif not is64 and env["bits"] == "64":
env.Append(CCFLAGS=["-m64"])
env.Append(LINKFLAGS=["-m64", "-L/usr/lib/i686-linux-gnu"])
# Link those statically for portability
if env["use_static_cpp"]:
# Workaround for GH-31743, Ubuntu 18.04 i386 crashes when it's used.
# That doesn't make any sense but it's likely a Ubuntu bug?
if is64 or env["bits"] == "64":
env.Append(LINKFLAGS=["-static-libgcc", "-static-libstdc++"])
if env["use_llvm"]:
env["LINKCOM"] = env["LINKCOM"] + " -l:libatomic.a"
else:
if env["use_llvm"]:
env.Append(LIBS=["atomic"])
|
the-stack_0_13202 | class News:
'''
News class to define News Objects
'''
def __init__(self, name, description, url, urlToImage, content):
self.name = name
self.description = description
self.url = url
self.urlToImage = urlToImage
self.content = content
class Sources:
'''
Sources class to define Sources objects
'''
def __init__(self, id, name, description, url, category):
self.id = id
self.name = name
self.description = description
self.url = url
self.category = category
|
the-stack_0_13203 | """
Created on Sat Nov 30 11:12:39 2019
@author: Bogdan
"""
import os
import sys
import numpy as np
from scipy.signal import convolve2d
project_path = os.getcwd()
while os.path.basename(project_path) != 'image-tinkering':
project_path = os.path.dirname(project_path)
sys.path.append(project_path)
from backend import utils
def apply_kernel(image, kernel):
""" Performs convolution between the given image and kernel """
if utils.is_color(image):
result_b = convolve2d(image[:, :, 0], kernel, mode='same', fillvalue=np.median(image[:, :, 0]))
result_g = convolve2d(image[:, :, 1], kernel, mode='same', fillvalue=np.median(image[:, :, 1]))
result_r = convolve2d(image[:, :, 2], kernel, mode='same', fillvalue=np.median(image[:, :, 2]))
channels_list = []
# Trim values lower than 0 or higher than 255 and convert to uint8 for openCV compatibility
for channel in 'bgr':
underflow_mask = locals()['result_' + channel] < 0
result_temp = np.where(underflow_mask, 0, locals()['result_' + channel])
result_temp = np.where(result_temp > 255, 255, result_temp)
result_temp = result_temp.astype(np.uint8)
channels_list.append(result_temp)
filtered_image = utils.merge_channels(channels_list)
else:
# Trim values lower than 0 or higher than 255 and convert to uint8 for openCV compatibility
filtered_image = convolve2d(image, kernel, mode='same')
filtered_image = np.where(filtered_image < 0, 0, filtered_image)
filtered_image = np.where(filtered_image > 255, 255, filtered_image)
filtered_image = filtered_image.astype(np.uint8)
return filtered_image
def generate_box_kernel(size):
""" Generates a kernel having the given size and giving equal weights to all
elements surrounding the current pixel """
return (1 / size ** 2) * np.ones((size, size), dtype=np.uint8)
def generate_gaussian_kernel(size, sigma=3):
""" Generates an one-sum kernel having the given size, containing samples
from a gaussian distribution having the given standard deviation """
size = size // 2
x, y = np.mgrid[-size: size + 1, -size: size + 1]
normalization_factor = 1 / (2.0 * np.pi * sigma**2)
g = np.exp(-((x ** 2 + y ** 2) / (2.0 * sigma ** 2))) * normalization_factor
g = g / (np.sum(g)) # Normalize the kernel so the sum of elements is 1
return g
def get_thresholds(image, method, kernel_size):
""" Performs convolution between the image and the kernel of the specified
size. The resulting values are the thresholds used in binarization """
if method == 'mean':
kernel = generate_box_kernel(kernel_size)
else:
kernel = generate_gaussian_kernel(kernel_size)
thresholds = apply_kernel(image, kernel)
return thresholds
def generate_emboss_kernels(size, direction, kernel_type):
""" Generates the kernels of the specified type (mask or filter), size and
direction, needed for the embossing operation """
kernel1 = np.zeros((size, size), dtype=np.int8)
if direction == 'horizontal':
kernel1[: size // 2, size // 2] = 1
kernel1[size // 2 + 1:, size // 2] = -1
if kernel_type == 'filter':
kernel1[size // 2, size // 2] = 1
kernel2 = np.flipud(kernel1)
elif direction == 'vertical':
kernel1[size // 2, : size // 2] = 1
kernel1[size // 2, size // 2 + 1:] = -1
if kernel_type == 'filter':
kernel1[size // 2, size // 2] = 1
kernel2 = np.fliplr(kernel1)
else:
for i in range(size):
if i < size // 2:
kernel1[i, i] = 1
elif i > size // 2:
kernel1[i, i] = -1
elif kernel_type == 'filter':
kernel1[i, i] = 1
kernel2 = np.flipud(kernel1)
return kernel1, kernel2
|
the-stack_0_13205 | """
fs.expose.dokan
===============
Expose an FS object to the native filesystem via Dokan.
This module provides the necessary interfaces to mount an FS object into
the local filesystem using Dokan on win32::
http://dokan-dev.github.io/
For simple usage, the function 'mount' takes an FS object
and new device mount point or an existing empty folder
and exposes the given FS as that path::
>>> from fs.memoryfs import MemoryFS
>>> from fs.expose import dokan
>>> fs = MemoryFS()
>>> # Mount device mount point
>>> mp = dokan.mount(fs, "Q:\\")
>>> mp.path
'Q:\\'
>>> mp.unmount()
>>> fs = MemoryFS()
>>> # Mount in an existing empty folder.
>>> mp = dokan.mount(fs, "C:\\test")
>>> mp.path
'C:\\test'
>>> mp.unmount()
The above spawns a new background process to manage the Dokan event loop, which
can be controlled through the returned subprocess.Popen object. To avoid
spawning a new process, set the 'foreground' option::
>>> # This will block until the filesystem is unmounted
>>> dokan.mount(fs, "Q:\\", foreground=True)
Any additional options for the Dokan process can be passed as keyword arguments
to the 'mount' function.
If you require finer control over the creation of the Dokan process, you can
instantiate the MountProcess class directly. It accepts all options available
to subprocess.Popen::
>>> from subprocess import PIPE
>>> mp = dokan.MountProcess(fs, "Q:\\", stderr=PIPE)
>>> dokan_errors = mp.communicate()[1]
If you are exposing an untrusted filesystem, you may like to apply the
wrapper class Win32SafetyFS before passing it into dokan. This will take
a number of steps to avoid suspicious operations on windows, such as
hiding autorun files.
The binding to Dokan is created via ctypes. Due to the very stable ABI of
win32, this should work without further configuration on just about all
systems with Dokan installed.
"""
# Copyright (c) 2009-2010, Cloud Matrix Pty. Ltd.
# Copyright (c) 2016-2016, Adrien J. <[email protected]>.
# All rights reserved; available under the terms of the MIT License.
from __future__ import with_statement, absolute_import
import six
import sys
import os
import errno
import time
import stat as statinfo
import subprocess
try:
import cPickle as pickle
except ImportError:
import pickle
import datetime
import ctypes
from collections import deque
from six.moves import range
from fs.base import threading
from fs.errors import *
from fs.path import *
from fs.local_functools import wraps
from fs.wrapfs import WrapFS
try:
from . import libdokan
except (NotImplementedError, EnvironmentError, ImportError, NameError,):
is_available = False
sys.modules.pop("fs.expose.dokan.libdokan", None)
libdokan = None
else:
is_available = True
from ctypes.wintypes import LPCWSTR, WCHAR
kernel32 = ctypes.windll.kernel32
import logging
logger = logging.getLogger("fs.expose.dokan")
# Options controlling the behavior of the Dokan filesystem
# Ouput debug message
DOKAN_OPTION_DEBUG = 1
# Ouput debug message to stderr
DOKAN_OPTION_STDERR = 2
# Use alternate stream
DOKAN_OPTION_ALT_STREAM = 4
# Mount drive as write-protected.
DOKAN_OPTION_WRITE_PROTECT = 8
# Use network drive, you need to install Dokan network provider.
DOKAN_OPTION_NETWORK = 16
# Use removable drive
DOKAN_OPTION_REMOVABLE = 32
# Use mount manager
DOKAN_OPTION_MOUNT_MANAGER = 64
# Mount the drive on current session only
DOKAN_OPTION_CURRENT_SESSION = 128
# FileLock in User Mode
DOKAN_OPTION_FILELOCK_USER_MODE = 256
# Error codes returned by DokanMain
DOKAN_SUCCESS = 0
# General Error
DOKAN_ERROR = -1
# Bad Drive letter
DOKAN_DRIVE_LETTER_ERROR = -2
# Can't install driver
DOKAN_DRIVER_INSTALL_ERROR = -3
# Driver something wrong
DOKAN_START_ERROR = -4
# Can't assign a drive letter or mount point
DOKAN_MOUNT_ERROR = -5
# Mount point is invalid
DOKAN_MOUNT_POINT_ERROR = -6
# Requested an incompatible version
DOKAN_VERSION_ERROR = -7
# Misc windows constants
FILE_LIST_DIRECTORY = 0x01
FILE_SHARE_READ = 0x01
FILE_SHARE_WRITE = 0x02
FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
FILE_FLAG_OVERLAPPED = 0x40000000
FILE_ATTRIBUTE_ARCHIVE = 32
FILE_ATTRIBUTE_COMPRESSED = 2048
FILE_ATTRIBUTE_DIRECTORY = 16
FILE_ATTRIBUTE_HIDDEN = 2
FILE_ATTRIBUTE_NORMAL = 128
FILE_ATTRIBUTE_OFFLINE = 4096
FILE_ATTRIBUTE_READONLY = 1
FILE_ATTRIBUTE_SYSTEM = 4
FILE_ATTRIBUTE_TEMPORARY = 4
FILE_CREATE = 2
FILE_OPEN = 1
FILE_OPEN_IF = 3
FILE_OVERWRITE = 4
FILE_SUPERSEDE = 0
FILE_OVERWRITE_IF = 5
FILE_GENERIC_READ = 1179785
FILE_GENERIC_WRITE = 1179926
FILE_DELETE_ON_CLOSE = 0x00001000
REQ_GENERIC_READ = 0x80 | 0x08 | 0x01
REQ_GENERIC_WRITE = 0x004 | 0x0100 | 0x002 | 0x0010
STATUS_SUCCESS = 0x0
STATUS_ACCESS_DENIED = 0xC0000022
STATUS_LOCK_NOT_GRANTED = 0xC0000055
STATUS_NOT_SUPPORTED = 0xC00000BB
STATUS_OBJECT_NAME_COLLISION = 0xC0000035
STATUS_DIRECTORY_NOT_EMPTY = 0xC0000101
STATUS_NOT_LOCKED = 0xC000002A
STATUS_OBJECT_NAME_NOT_FOUND = 0xC0000034
STATUS_NOT_IMPLEMENTED = 0xC0000002
STATUS_OBJECT_PATH_NOT_FOUND = 0xC000003A
STATUS_BUFFER_OVERFLOW = 0x80000005
ERROR_ALREADY_EXISTS = 183
FILE_CASE_SENSITIVE_SEARCH = 0x00000001
FILE_CASE_PRESERVED_NAMES = 0x00000002
FILE_SUPPORTS_REMOTE_STORAGE = 0x00000100
FILE_UNICODE_ON_DISK = 0x00000004
FILE_PERSISTENT_ACLS = 0x00000008
# Some useful per-process global information
NATIVE_ENCODING = sys.getfilesystemencoding()
DATETIME_ZERO = datetime.datetime(1, 1, 1, 0, 0, 0)
DATETIME_STARTUP = datetime.datetime.utcnow()
FILETIME_UNIX_EPOCH = 116444736000000000
def handle_fs_errors(func):
"""Method decorator to report FS errors in the appropriate way.
This decorator catches all FS errors and translates them into an
equivalent OSError, then returns the negated error number. It also
makes the function return zero instead of None as an indication of
successful execution.
"""
func = convert_fs_errors(func)
@wraps(func)
def wrapper(*args, **kwds):
try:
res = func(*args, **kwds)
except OSError as e:
if e.errno:
res = _errno2syserrcode(e.errno)
else:
res = STATUS_ACCESS_DENIED;
except Exception as e:
raise
else:
if res is None:
res = 0
return res
return wrapper
# During long-running operations, Dokan requires that the DokanResetTimeout
# function be called periodically to indicate the progress is still being
# made. Unfortunately we don't have any facility for the underlying FS
# to make these calls for us, so we have to hack around it.
#
# The idea is to use a single background thread to monitor all active Dokan
# method calls, resetting the timeout until they have completed. Note that
# this completely undermines the point of DokanResetTimeout as it's now
# possible for a deadlock to hang the entire filesystem.
_TIMEOUT_PROTECT_THREAD = None
_TIMEOUT_PROTECT_LOCK = threading.Lock()
_TIMEOUT_PROTECT_COND = threading.Condition(_TIMEOUT_PROTECT_LOCK)
_TIMEOUT_PROTECT_QUEUE = deque()
_TIMEOUT_PROTECT_WAIT_TIME = 4 * 60
_TIMEOUT_PROTECT_RESET_TIME = 5 * 60 * 1000
def _start_timeout_protect_thread():
"""Start the background thread used to protect dokan from timeouts.
This function starts the background thread that monitors calls into the
dokan API and resets their timeouts. It's safe to call this more than
once, only a single thread will be started.
"""
global _TIMEOUT_PROTECT_THREAD
with _TIMEOUT_PROTECT_LOCK:
if _TIMEOUT_PROTECT_THREAD is None:
target = _run_timeout_protect_thread
_TIMEOUT_PROTECT_THREAD = threading.Thread(target=target)
_TIMEOUT_PROTECT_THREAD.daemon = True
_TIMEOUT_PROTECT_THREAD.start()
def _run_timeout_protect_thread():
while True:
with _TIMEOUT_PROTECT_COND:
try:
(when, info, finished) = _TIMEOUT_PROTECT_QUEUE.popleft()
except IndexError:
_TIMEOUT_PROTECT_COND.wait()
continue
if finished:
continue
now = time.time()
wait_time = max(0, _TIMEOUT_PROTECT_WAIT_TIME - now + when)
time.sleep(wait_time)
with _TIMEOUT_PROTECT_LOCK:
if finished:
continue
libdokan.DokanResetTimeout(_TIMEOUT_PROTECT_RESET_TIME, info)
_TIMEOUT_PROTECT_QUEUE.append((now + wait_time, info, finished))
def timeout_protect(func):
"""Method decorator to enable timeout protection during call.
This decorator adds an entry to the timeout protect queue before executing
the function, and marks it as finished when the function exits.
"""
@wraps(func)
def wrapper(self, *args):
if _TIMEOUT_PROTECT_THREAD is None:
_start_timeout_protect_thread()
info = args[-1]
finished = []
try:
with _TIMEOUT_PROTECT_COND:
_TIMEOUT_PROTECT_QUEUE.append((time.time(), info, finished))
_TIMEOUT_PROTECT_COND.notify()
return func(self, *args)
finally:
with _TIMEOUT_PROTECT_LOCK:
finished.append(True)
return wrapper
MIN_FH = 100
class FSOperations(object):
"""Object delegating all DOKAN_OPERATIONS pointers to an FS object."""
def __init__(self, fs, fsname="NTFS", volname="Dokan Volume", securityfolder=os.path.expanduser('~')):
if libdokan is None:
msg = 'dokan library (http://dokan-dev.github.io/) is not available'
raise OSError(msg)
self.fs = fs
self.fsname = fsname
self.volname = volname
self.securityfolder = securityfolder
self._files_by_handle = {}
self._files_lock = threading.Lock()
self._next_handle = MIN_FH
# Windows requires us to implement a kind of "lazy deletion", where
# a handle is marked for deletion but this is not actually done
# until the handle is closed. This set monitors pending deletes.
self._pending_delete = set()
# Since pyfilesystem has no locking API, we manage file locks
# in memory. This maps paths to a list of current locks.
self._active_locks = PathMap()
# Dokan expects a succesful write() to be reflected in the file's
# reported size, but the FS might buffer writes and prevent this.
# We explicitly keep track of the size Dokan expects a file to be.
# This dict is indexed by path, then file handle.
self._files_size_written = PathMap()
def get_ops_struct(self):
"""Get a DOKAN_OPERATIONS struct mapping to our methods."""
struct = libdokan.DOKAN_OPERATIONS()
for (nm, typ) in libdokan.DOKAN_OPERATIONS._fields_:
setattr(struct, nm, typ(getattr(self, nm)))
return struct
def _get_file(self, fh):
"""Get the information associated with the given file handle."""
try:
return self._files_by_handle[fh]
except KeyError:
raise FSError("invalid file handle")
def _reg_file(self, f, path):
"""Register a new file handle for the given file and path."""
self._files_lock.acquire()
try:
fh = self._next_handle
self._next_handle += 1
lock = threading.Lock()
self._files_by_handle[fh] = (f, path, lock)
if path not in self._files_size_written:
self._files_size_written[path] = {}
self._files_size_written[path][fh] = 0
return fh
finally:
self._files_lock.release()
def _rereg_file(self, fh, f):
"""Re-register the file handle for the given file.
This might be necessary if we are required to write to a file
after its handle was closed (e.g. to complete an async write).
"""
self._files_lock.acquire()
try:
(f2, path, lock) = self._files_by_handle[fh]
assert f2.closed
self._files_by_handle[fh] = (f, path, lock)
return fh
finally:
self._files_lock.release()
def _del_file(self, fh):
"""Unregister the given file handle."""
self._files_lock.acquire()
try:
(f, path, lock) = self._files_by_handle.pop(fh)
del self._files_size_written[path][fh]
if not self._files_size_written[path]:
del self._files_size_written[path]
finally:
self._files_lock.release()
def _is_pending_delete(self, path):
"""Check if the given path is pending deletion.
This is true if the path or any of its parents have been marked
as pending deletion, false otherwise.
"""
for ppath in recursepath(path):
if ppath in self._pending_delete:
return True
return False
def _check_lock(self, path, offset, length, info, locks=None):
"""Check whether the given file range is locked.
This method implements basic lock checking. It checks all the locks
held against the given file, and if any overlap the given byte range
then it returns STATUS_LOCK_NOT_GRANTED. If the range is not locked, it will
return zero.
"""
if locks is None:
with self._files_lock:
try:
locks = self._active_locks[path]
except KeyError:
return STATUS_SUCCESS
for (lh, lstart, lend) in locks:
if info is not None and info.contents.Context == lh:
continue
if lstart >= offset + length:
continue
if lend <= offset:
continue
return STATUS_LOCK_NOT_GRANTED
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def ZwCreateFile(self, path, securitycontext, access, attribute, sharing, disposition, options, info):
path = self._dokanpath2pyfs(path)
# Can't open files that are pending delete.
if self._is_pending_delete(path):
return STATUS_ACCESS_DENIED
retcode = STATUS_SUCCESS
if self.fs.isdir(path) or info.contents.IsDirectory:
info.contents.IsDirectory = True
exist = self.fs.exists(path)
if disposition == FILE_CREATE:
if self.fs.exists(path):
retcode = STATUS_OBJECT_NAME_COLLISION
self.fs.makedir(path)
elif disposition == FILE_OPEN_IF:
if not self.fs.exists(path):
retcode = STATUS_OBJECT_PATH_NOT_FOUND
else:
# If no access rights are requestsed, only basic metadata is queried.
if not access:
if self.fs.isdir(path):
info.contents.IsDirectory = True
elif not self.fs.exists(path):
return STATUS_OBJECT_NAME_NOT_FOUND
return STATUS_SUCCESS
# This is where we'd convert the access mask into an appropriate
# mode string. Unfortunately, I can't seem to work out all the
# details. I swear MS Word is trying to write to files that it
# opens without asking for write permission.
# For now, just set the mode based on disposition flag.
if disposition == FILE_OVERWRITE_IF or disposition == FILE_SUPERSEDE:
if self.fs.exists(path):
retcode = STATUS_OBJECT_NAME_COLLISION
mode = "w+b"
elif disposition == FILE_OPEN_IF:
if not self.fs.exists(path):
mode = "w+b"
else:
mode = "r+b"
elif disposition == FILE_OPEN:
if not self.fs.exists(path):
return STATUS_OBJECT_NAME_NOT_FOUND
mode = "r+b"
elif disposition == FILE_OVERWRITE:
if not self.fs.exists(path):
return STATUS_OBJECT_NAME_NOT_FOUND
mode = "w+b"
elif disposition == FILE_CREATE:
if self.fs.exists(path):
return STATUS_OBJECT_NAME_COLLISION
mode = "w+b"
else:
mode = "r+b"
# Try to open the requested file. It may actually be a directory.
info.contents.Context = 1
try:
f = self.fs.open(path, mode)
# print(path, mode, repr(f))
except ResourceInvalidError:
info.contents.IsDirectory = True
except FSError as e:
# Sadly, win32 OSFS will raise all kinds of strange errors
# if you try to open() a directory. Need to check by hand.
if self.fs.isdir(path):
info.contents.IsDirectory = True
else:
# print(e)
raise
else:
info.contents.Context = self._reg_file(f, path)
if retcode == STATUS_SUCCESS and (options & FILE_DELETE_ON_CLOSE):
self._pending_delete.add(path)
return retcode
@timeout_protect
@handle_fs_errors
def Cleanup(self, path, info):
path = self._dokanpath2pyfs(path)
if info.contents.IsDirectory:
if info.contents.DeleteOnClose:
self.fs.removedir(path)
self._pending_delete.remove(path)
else:
(file, _, lock) = self._get_file(info.contents.Context)
lock.acquire()
try:
file.close()
if info.contents.DeleteOnClose:
self.fs.remove(path)
self._pending_delete.remove(path)
self._del_file(info.contents.Context)
info.contents.Context = 0
finally:
lock.release()
@timeout_protect
@handle_fs_errors
def CloseFile(self, path, info):
if info.contents.Context >= MIN_FH:
(file, _, lock) = self._get_file(info.contents.Context)
lock.acquire()
try:
file.close()
self._del_file(info.contents.Context)
finally:
lock.release()
info.contents.Context = 0
@timeout_protect
@handle_fs_errors
def ReadFile(self, path, buffer, nBytesToRead, nBytesRead, offset, info):
path = self._dokanpath2pyfs(path)
(file, _, lock) = self._get_file(info.contents.Context)
lock.acquire()
try:
status = self._check_lock(path, offset, nBytesToRead, info)
if status:
return status
# This may be called after Cleanup, meaning we
# need to re-open the file.
if file.closed:
file = self.fs.open(path, file.mode)
self._rereg_file(info.contents.Context, file)
file.seek(offset)
data = file.read(nBytesToRead)
ctypes.memmove(buffer, ctypes.create_string_buffer(data), len(data))
nBytesRead[0] = len(data)
finally:
lock.release()
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def WriteFile(self, path, buffer, nBytesToWrite, nBytesWritten, offset, info):
path = self._dokanpath2pyfs(path)
fh = info.contents.Context
(file, _, lock) = self._get_file(fh)
lock.acquire()
try:
status = self._check_lock(path, offset, nBytesToWrite, info)
if status:
return status
# This may be called after Cleanup, meaning we
# need to re-open the file.
if file.closed:
file = self.fs.open(path, file.mode)
self._rereg_file(info.contents.Context, file)
if info.contents.WriteToEndOfFile:
file.seek(0, os.SEEK_END)
else:
file.seek(offset)
data = ctypes.create_string_buffer(nBytesToWrite)
ctypes.memmove(data, buffer, nBytesToWrite)
file.write(data.raw)
nBytesWritten[0] = len(data.raw)
try:
size_written = self._files_size_written[path][fh]
except KeyError:
pass
else:
if offset + nBytesWritten[0] > size_written:
new_size_written = offset + nBytesWritten[0]
self._files_size_written[path][fh] = new_size_written
finally:
lock.release()
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def FlushFileBuffers(self, path, info):
path = self._dokanpath2pyfs(path)
(file, _, lock) = self._get_file(info.contents.Context)
lock.acquire()
try:
file.flush()
finally:
lock.release()
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def GetFileInformation(self, path, buffer, info):
path = self._dokanpath2pyfs(path)
finfo = self.fs.getinfo(path)
data = buffer.contents
self._info2finddataw(path, finfo, data, info)
try:
written_size = max(self._files_size_written[path].values())
except KeyError:
pass
else:
reported_size = (data.nFileSizeHigh << 32) + data.nFileSizeLow
if written_size > reported_size:
data.nFileSizeHigh = written_size >> 32
data.nFileSizeLow = written_size & 0xffffffff
data.nNumberOfLinks = 1
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def FindFiles(self, path, fillFindData, info):
path = self._dokanpath2pyfs(path)
for (nm, finfo) in self.fs.listdirinfo(path):
fpath = pathjoin(path, nm)
if self._is_pending_delete(fpath):
continue
data = self._info2finddataw(fpath, finfo)
fillFindData(ctypes.byref(data), info)
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def FindFilesWithPattern(self, path, pattern, fillFindData, info):
path = self._dokanpath2pyfs(path)
for (nm, finfo) in self.fs.listdirinfo(path):
fpath = pathjoin(path, nm)
if self._is_pending_delete(fpath):
continue
if not libdokan.DokanIsNameInExpression(pattern, nm, True):
continue
data = self._info2finddataw(fpath, finfo, None)
fillFindData(ctypes.byref(data), info)
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def SetFileAttributes(self, path, attrs, info):
path = self._dokanpath2pyfs(path)
# TODO: decode various file attributes
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def SetFileTime(self, path, ctime, atime, mtime, info):
path = self._dokanpath2pyfs(path)
# setting ctime is not supported
if atime is not None:
try:
atime = _filetime2datetime(atime.contents)
except ValueError:
atime = None
if mtime is not None:
try:
mtime = _filetime2datetime(mtime.contents)
except ValueError:
mtime = None
# some programs demand this succeed; fake it
try:
self.fs.settimes(path, atime, mtime)
except UnsupportedError:
pass
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def DeleteFile(self, path, info):
path = self._dokanpath2pyfs(path)
if not self.fs.isfile(path):
if not self.fs.exists(path):
return STATUS_ACCESS_DENIED
else:
return STATUS_OBJECT_NAME_NOT_FOUND
self._pending_delete.add(path)
# the actual delete takes place in self.CloseFile()
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def DeleteDirectory(self, path, info):
path = self._dokanpath2pyfs(path)
for nm in self.fs.listdir(path):
if not self._is_pending_delete(pathjoin(path, nm)):
return STATUS_DIRECTORY_NOT_EMPTY
self._pending_delete.add(path)
# the actual delete takes place in self.CloseFile()
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def MoveFile(self, src, dst, overwrite, info):
# Close the file if we have an open handle to it.
if info.contents.Context >= MIN_FH:
(file, _, lock) = self._get_file(info.contents.Context)
lock.acquire()
try:
file.close()
self._del_file(info.contents.Context)
finally:
lock.release()
src = self._dokanpath2pyfs(src)
dst = self._dokanpath2pyfs(dst)
if info.contents.IsDirectory:
self.fs.movedir(src, dst, overwrite=overwrite)
else:
self.fs.move(src, dst, overwrite=overwrite)
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def SetEndOfFile(self, path, length, info):
self._dokanpath2pyfs(path)
(file, _, lock) = self._get_file(info.contents.Context)
lock.acquire()
try:
pos = file.tell()
if length != pos:
file.seek(length)
file.truncate()
if pos < length:
file.seek(min(pos, length))
finally:
lock.release()
return STATUS_SUCCESS
@handle_fs_errors
def GetDiskFreeSpace(self, nBytesAvail, nBytesTotal, nBytesFree, info):
# This returns a stupidly large number if not info is available.
# It's better to pretend an operation is possible and have it fail
# than to pretend an operation will fail when it's actually possible.
large_amount = 100 * 1024 * 1024 * 1024
nBytesFree[0] = self.fs.getmeta("free_space", large_amount)
nBytesTotal[0] = self.fs.getmeta("total_space", 2 * large_amount)
nBytesAvail[0] = nBytesFree[0]
return STATUS_SUCCESS
@handle_fs_errors
def GetVolumeInformation(self, vnmBuf, vnmSz, sNum, maxLen, flags, fnmBuf, fnmSz, info):
nm = ctypes.create_unicode_buffer(self.volname[:vnmSz - 1])
sz = (len(nm.value) + 1) * ctypes.sizeof(ctypes.c_wchar)
ctypes.memmove(vnmBuf, nm, sz)
if sNum:
sNum[0] = 0
if maxLen:
maxLen[0] = 255
if flags:
flags[0] = FILE_CASE_SENSITIVE_SEARCH | FILE_CASE_PRESERVED_NAMES | FILE_SUPPORTS_REMOTE_STORAGE | FILE_UNICODE_ON_DISK | FILE_PERSISTENT_ACLS;
nm = ctypes.create_unicode_buffer(self.fsname[:fnmSz - 1])
sz = (len(nm.value) + 1) * ctypes.sizeof(ctypes.c_wchar)
ctypes.memmove(fnmBuf, nm, sz)
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def SetAllocationSize(self, path, length, info):
# I think this is supposed to reserve space for the file
# but *not* actually move the end-of-file marker.
# No way to do that in pyfs.
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def LockFile(self, path, offset, length, info):
end = offset + length
with self._files_lock:
try:
locks = self._active_locks[path]
except KeyError:
locks = self._active_locks[path] = []
else:
status = self._check_lock(path, offset, length, None, locks)
if status:
return status
locks.append((info.contents.Context, offset, end))
return STATUS_SUCCESS
@timeout_protect
@handle_fs_errors
def UnlockFile(self, path, offset, length, info):
with self._files_lock:
try:
locks = self._active_locks[path]
except KeyError:
return STATUS_NOT_LOCKED
todel = []
for i, (lh, lstart, lend) in enumerate(locks):
if info.contents.Context == lh:
if lstart == offset:
if lend == offset + length:
todel.append(i)
if not todel:
return STATUS_NOT_LOCKED
for i in reversed(todel):
del locks[i]
return STATUS_SUCCESS
@handle_fs_errors
def GetFileSecurity(self, path, securityinformation, securitydescriptor, securitydescriptorlength, neededlength, info):
securitydescriptor = ctypes.cast(securitydescriptor, libdokan.PSECURITY_DESCRIPTOR)
path = self._dokanpath2pyfs(path)
if self.fs.isdir(path):
res = libdokan.GetFileSecurity(
self.securityfolder,
ctypes.cast(securityinformation, libdokan.PSECURITY_INFORMATION)[0],
securitydescriptor,
securitydescriptorlength,
neededlength,
)
return STATUS_SUCCESS if res else STATUS_BUFFER_OVERFLOW
return STATUS_NOT_IMPLEMENTED
@handle_fs_errors
def SetFileSecurity(self, path, securityinformation, securitydescriptor, securitydescriptorlength, info):
return STATUS_NOT_IMPLEMENTED
@handle_fs_errors
def Mounted(self, info):
return STATUS_SUCCESS
@handle_fs_errors
def Unmounted(self, info):
return STATUS_SUCCESS
@handle_fs_errors
def FindStreams(self, path, callback, info):
return STATUS_NOT_IMPLEMENTED
def _dokanpath2pyfs(self, path):
path = path.replace('\\', '/')
return normpath(path)
def _info2attrmask(self, path, info, hinfo=None):
"""Convert a file/directory info dict to a win32 file attribute mask."""
attrs = 0
st_mode = info.get("st_mode", None)
if st_mode:
if statinfo.S_ISDIR(st_mode):
attrs |= FILE_ATTRIBUTE_DIRECTORY
elif statinfo.S_ISREG(st_mode):
attrs |= FILE_ATTRIBUTE_NORMAL
if not attrs and hinfo:
if hinfo.contents.IsDirectory:
attrs |= FILE_ATTRIBUTE_DIRECTORY
else:
attrs |= FILE_ATTRIBUTE_NORMAL
if not attrs:
if self.fs.isdir(path):
attrs |= FILE_ATTRIBUTE_DIRECTORY
else:
attrs |= FILE_ATTRIBUTE_NORMAL
return attrs
def _info2finddataw(self, path, info, data=None, hinfo=None):
"""Convert a file/directory info dict into a WIN32_FIND_DATAW struct."""
if data is None:
data = libdokan.WIN32_FIND_DATAW()
data.dwFileAttributes = self._info2attrmask(path, info, hinfo)
data.ftCreationTime = _datetime2filetime(info.get("created_time", None))
data.ftLastAccessTime = _datetime2filetime(info.get("accessed_time", None))
data.ftLastWriteTime = _datetime2filetime(info.get("modified_time", None))
data.nFileSizeHigh = info.get("size", 0) >> 32
data.nFileSizeLow = info.get("size", 0) & 0xffffffff
data.cFileName = basename(path)
data.cAlternateFileName = ""
return data
def _datetime2timestamp(dtime):
"""Convert a datetime object to a unix timestamp."""
t = time.mktime(dtime.timetuple())
t += dtime.microsecond / 1000000.0
return t
def _timestamp2datetime(tstamp):
"""Convert a unix timestamp to a datetime object."""
return datetime.datetime.fromtimestamp(tstamp)
def _timestamp2filetime(tstamp):
f = FILETIME_UNIX_EPOCH + int(tstamp * 10000000)
return libdokan.FILETIME(f & 0xffffffff, f >> 32)
def _filetime2timestamp(ftime):
f = ftime.dwLowDateTime | (ftime.dwHighDateTime << 32)
return (f - FILETIME_UNIX_EPOCH) / 10000000.0
def _filetime2datetime(ftime):
"""Convert a FILETIME struct info datetime.datetime object."""
if ftime is None:
return DATETIME_ZERO
if ftime.dwLowDateTime == 0 and ftime.dwHighDateTime == 0:
return DATETIME_ZERO
return _timestamp2datetime(_filetime2timestamp(ftime))
def _datetime2filetime(dtime):
"""Convert a FILETIME struct info datetime.datetime object."""
if dtime is None:
return libdokan.FILETIME(0, 0)
if dtime == DATETIME_ZERO:
return libdokan.FILETIME(0, 0)
return _timestamp2filetime(_datetime2timestamp(dtime))
def _errno2syserrcode(eno):
"""Convert an errno into a win32 system error code."""
if eno == errno.EEXIST:
return STATUS_OBJECT_NAME_COLLISION
if eno == errno.ENOTEMPTY:
return STATUS_DIRECTORY_NOT_EMPTY
if eno == errno.ENOSYS:
return STATUS_NOT_SUPPORTED
if eno == errno.EACCES:
return STATUS_ACCESS_DENIED
return eno
def _check_path_string(path): # TODO Probably os.path has a better check for this...
"""Check path string."""
if not path or not path[0].isalpha() or not path[1:3] == ':\\':
raise ValueError("invalid path: %r" % (path,))
def mount(fs, path, foreground=False, ready_callback=None, unmount_callback=None, **kwds):
"""Mount the given FS at the given path, using Dokan.
By default, this function spawns a new background process to manage the
Dokan event loop. The return value in this case is an instance of the
'MountProcess' class, a subprocess.Popen subclass.
If the keyword argument 'foreground' is given, we instead run the Dokan
main loop in the current process. In this case the function will block
until the filesystem is unmounted, then return None.
If the keyword argument 'ready_callback' is provided, it will be called
when the filesystem has been mounted and is ready for use. Any additional
keyword arguments control the behavior of the final dokan mount point.
Some interesting options include:
* numthreads: number of threads to use for handling Dokan requests
* fsname: name to display in explorer etc
* flags: DOKAN_OPTIONS bitmask
* securityfolder: folder path used to duplicate security rights on all folders
* FSOperationsClass: custom FSOperations subclass to use
"""
if libdokan is None:
raise OSError("the dokan library is not available")
_check_path_string(path)
# This function captures the logic of checking whether the Dokan mount
# is up and running. Unfortunately I can't find a way to get this
# via a callback in the Dokan API. Instead we just check for the path
# in a loop, polling the mount proc to make sure it hasn't died.
def check_alive(mp):
if mp and mp.poll() is not None:
raise OSError("dokan mount process exited prematurely")
def check_ready(mp=None):
if ready_callback is not False:
check_alive(mp)
for _ in range(100):
try:
os.stat(path)
except EnvironmentError:
check_alive(mp)
time.sleep(0.05)
else:
check_alive(mp)
if ready_callback:
return ready_callback()
else:
return None
else:
check_alive(mp)
raise OSError("dokan mount process seems to be hung")
# Running the the foreground is the final endpoint for the mount
# operation, it's where we call DokanMain().
if foreground:
numthreads = kwds.pop("numthreads", 0)
flags = kwds.pop("flags", 0)
FSOperationsClass = kwds.pop("FSOperationsClass", FSOperations)
opts = libdokan.DOKAN_OPTIONS(libdokan.DOKAN_MINIMUM_COMPATIBLE_VERSION, numthreads, flags, 0, path, "", 2000, 512, 512)
ops = FSOperationsClass(fs, **kwds)
if ready_callback:
check_thread = threading.Thread(target=check_ready)
check_thread.daemon = True
check_thread.start()
opstruct = ops.get_ops_struct()
res = libdokan.DokanMain(ctypes.byref(opts), ctypes.byref(opstruct))
if res != DOKAN_SUCCESS:
raise OSError("Dokan failed with error: %d" % (res,))
if unmount_callback:
unmount_callback()
# Running the background, spawn a subprocess and wait for it
# to be ready before returning.
else:
mp = MountProcess(fs, path, kwds)
check_ready(mp)
if unmount_callback:
orig_unmount = mp.unmount
def new_unmount():
orig_unmount()
unmount_callback()
mp.unmount = new_unmount
return mp
def unmount(path):
"""Unmount the given path.
This function unmounts the dokan path mounted at the given path.
It works but may leave dangling processes; its better to use the "unmount"
method on the MountProcess class if you have one.
"""
_check_path_string(path)
if not libdokan.DokanRemoveMountPoint(path):
raise OSError("filesystem could not be unmounted: %s" % (path,))
class MountProcess(subprocess.Popen):
"""subprocess.Popen subclass managing a Dokan mount.
This is a subclass of subprocess.Popen, designed for easy management of
a Dokan mount in a background process. Rather than specifying the command
to execute, pass in the FS object to be mounted, the target path
and a dictionary of options for the Dokan process.
In order to be passed successfully to the new process, the FS object
must be pickleable. Since win32 has no fork() this restriction is not
likely to be lifted (see also the "multiprocessing" module)
This class has an extra attribute 'path' giving the path of the mounted
filesystem, and an extra method 'unmount' that will cleanly unmount it
and terminate the process.
"""
# This works by spawning a new python interpreter and passing it the
# pickled (fs,path,opts) tuple on the command-line. Something like this:
#
# python -c "import MountProcess; MountProcess._do_mount('..data..')
#
unmount_timeout = 5
def __init__(self, fs, path, dokan_opts={}, nowait=False, **kwds):
if libdokan is None:
raise OSError("the dokan library is not available")
_check_path_string(path)
self.path = path
cmd = "try: import cPickle as pickle;\n"
cmd = cmd + "except ImportError: import pickle;\n"
cmd = cmd + "data = pickle.loads(%s); "
cmd = cmd + "from fs.expose.dokan import MountProcess; "
cmd = cmd + "MountProcess._do_mount(data)"
cmd = cmd % (repr(pickle.dumps((fs, path, dokan_opts, nowait), -1)),)
cmd = [sys.executable, "-c", cmd]
super(MountProcess, self).__init__(cmd, **kwds)
def unmount(self):
"""Cleanly unmount the Dokan filesystem, terminating this subprocess."""
if not libdokan.DokanRemoveMountPoint(self.path):
raise OSError("the filesystem could not be unmounted: %s" %(self.path,))
self.terminate()
if not hasattr(subprocess.Popen, "terminate"):
def terminate(self):
"""Gracefully terminate the subprocess."""
kernel32.TerminateProcess(int(self._handle), -1)
if not hasattr(subprocess.Popen, "kill"):
def kill(self):
"""Forcibly terminate the subprocess."""
kernel32.TerminateProcess(int(self._handle), -1)
@staticmethod
def _do_mount(data):
"""Perform the specified mount."""
(fs, path, opts, nowait) = data
opts["foreground"] = True
def unmount_callback():
fs.close()
opts["unmount_callback"] = unmount_callback
if nowait:
opts["ready_callback"] = False
mount(fs, path, **opts)
class Win32SafetyFS(WrapFS):
"""FS wrapper for extra safety when mounting on win32.
This wrapper class provides some safety features when mounting untrusted
filesystems on win32. Specifically:
* hiding autorun files
* removing colons from paths
"""
def __init__(self, wrapped_fs, allow_autorun=False):
self.allow_autorun = allow_autorun
super(Win32SafetyFS, self).__init__(wrapped_fs)
def _encode(self, path):
path = relpath(normpath(path))
path = path.replace(":", "__colon__")
if not self.allow_autorun:
if path.lower().startswith("_autorun."):
path = path[1:]
return path
def _decode(self, path):
path = relpath(normpath(path))
path = path.replace("__colon__", ":")
if not self.allow_autorun:
if path.lower().startswith("autorun."):
path = "_" + path
return path
if __name__ == "__main__":
import os.path
import tempfile
from fs.osfs import OSFS
from fs.memoryfs import MemoryFS
from shutil import rmtree
from six import b
path = tempfile.mkdtemp()
try:
fs = OSFS(path)
#fs = MemoryFS()
fs.setcontents("test1.txt", b("test one"))
flags = DOKAN_OPTION_DEBUG | DOKAN_OPTION_STDERR | DOKAN_OPTION_REMOVABLE
mount(fs, "Q:\\", foreground=True, numthreads=1, flags=flags)
fs.close()
finally:
rmtree(path)
|
the-stack_0_13206 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Locale dependent formatting and parsing of numeric data.
The default locale for the functions in this module is determined by the
following environment variables, in that order:
* ``LC_NUMERIC``,
* ``LC_ALL``, and
* ``LANG``
"""
# TODO:
# Padding and rounding increments in pattern:
# - http://www.unicode.org/reports/tr35/ (Appendix G.6)
import math
import re
try:
from decimal import Decimal
have_decimal = True
except ImportError:
have_decimal = False
from babel.core import default_locale, Locale
__all__ = ['format_number', 'format_decimal', 'format_currency',
'format_percent', 'format_scientific', 'parse_number',
'parse_decimal', 'NumberFormatError']
__docformat__ = 'restructuredtext en'
LC_NUMERIC = default_locale('LC_NUMERIC')
def get_currency_name(currency, locale=LC_NUMERIC):
"""Return the name used by the locale for the specified currency.
>>> get_currency_name('USD', 'en_US')
u'US Dollar'
:param currency: the currency code
:param locale: the `Locale` object or locale identifier
:return: the currency symbol
:rtype: `unicode`
:since: version 0.9.4
"""
return Locale.parse(locale).currencies.get(currency, currency)
def get_currency_symbol(currency, locale=LC_NUMERIC):
"""Return the symbol used by the locale for the specified currency.
>>> get_currency_symbol('USD', 'en_US')
u'$'
:param currency: the currency code
:param locale: the `Locale` object or locale identifier
:return: the currency symbol
:rtype: `unicode`
"""
return Locale.parse(locale).currency_symbols.get(currency, currency)
def get_decimal_symbol(locale=LC_NUMERIC):
"""Return the symbol used by the locale to separate decimal fractions.
>>> get_decimal_symbol('en_US')
u'.'
:param locale: the `Locale` object or locale identifier
:return: the decimal symbol
:rtype: `unicode`
"""
return Locale.parse(locale).number_symbols.get('decimal', u'.')
def get_plus_sign_symbol(locale=LC_NUMERIC):
"""Return the plus sign symbol used by the current locale.
>>> get_plus_sign_symbol('en_US')
u'+'
:param locale: the `Locale` object or locale identifier
:return: the plus sign symbol
:rtype: `unicode`
"""
return Locale.parse(locale).number_symbols.get('plusSign', u'+')
def get_minus_sign_symbol(locale=LC_NUMERIC):
"""Return the plus sign symbol used by the current locale.
>>> get_minus_sign_symbol('en_US')
u'-'
:param locale: the `Locale` object or locale identifier
:return: the plus sign symbol
:rtype: `unicode`
"""
return Locale.parse(locale).number_symbols.get('minusSign', u'-')
def get_exponential_symbol(locale=LC_NUMERIC):
"""Return the symbol used by the locale to separate mantissa and exponent.
>>> get_exponential_symbol('en_US')
u'E'
:param locale: the `Locale` object or locale identifier
:return: the exponential symbol
:rtype: `unicode`
"""
return Locale.parse(locale).number_symbols.get('exponential', u'E')
def get_group_symbol(locale=LC_NUMERIC):
"""Return the symbol used by the locale to separate groups of thousands.
>>> get_group_symbol('en_US')
u','
:param locale: the `Locale` object or locale identifier
:return: the group symbol
:rtype: `unicode`
"""
return Locale.parse(locale).number_symbols.get('group', u',')
def format_number(number, locale=LC_NUMERIC):
"""Return the given number formatted for a specific locale.
>>> format_number(1099, locale='en_US')
u'1,099'
:param number: the number to format
:param locale: the `Locale` object or locale identifier
:return: the formatted number
:rtype: `unicode`
"""
# Do we really need this one?
return format_decimal(number, locale=locale)
def format_decimal(number, format=None, locale=LC_NUMERIC):
"""Return the given decimal number formatted for a specific locale.
>>> format_decimal(1.2345, locale='en_US')
u'1.234'
>>> format_decimal(1.2346, locale='en_US')
u'1.235'
>>> format_decimal(-1.2346, locale='en_US')
u'-1.235'
>>> format_decimal(1.2345, locale='sv_SE')
u'1,234'
>>> format_decimal(12345, locale='de')
u'12.345'
The appropriate thousands grouping and the decimal separator are used for
each locale:
>>> format_decimal(12345.5, locale='en_US')
u'12,345.5'
:param number: the number to format
:param format:
:param locale: the `Locale` object or locale identifier
:return: the formatted decimal number
:rtype: `unicode`
"""
locale = Locale.parse(locale)
if not format:
format = locale.decimal_formats.get(format)
pattern = parse_pattern(format)
return pattern.apply(number, locale)
def format_currency(number, currency, format=None, locale=LC_NUMERIC):
u"""Return formatted currency value.
>>> format_currency(1099.98, 'USD', locale='en_US')
u'$1,099.98'
>>> format_currency(1099.98, 'USD', locale='es_CO')
u'US$\\xa01.099,98'
>>> format_currency(1099.98, 'EUR', locale='de_DE')
u'1.099,98\\xa0\\u20ac'
The pattern can also be specified explicitly:
>>> format_currency(1099.98, 'EUR', u'\xa4\xa4 #,##0.00', locale='en_US')
u'EUR 1,099.98'
:param number: the number to format
:param currency: the currency code
:param locale: the `Locale` object or locale identifier
:return: the formatted currency value
:rtype: `unicode`
"""
locale = Locale.parse(locale)
if not format:
format = locale.currency_formats.get(format)
pattern = parse_pattern(format)
return pattern.apply(number, locale, currency=currency)
def format_percent(number, format=None, locale=LC_NUMERIC):
"""Return formatted percent value for a specific locale.
>>> format_percent(0.34, locale='en_US')
u'34%'
>>> format_percent(25.1234, locale='en_US')
u'2,512%'
>>> format_percent(25.1234, locale='sv_SE')
u'2\\xa0512\\xa0%'
The format pattern can also be specified explicitly:
>>> format_percent(25.1234, u'#,##0\u2030', locale='en_US')
u'25,123\u2030'
:param number: the percent number to format
:param format:
:param locale: the `Locale` object or locale identifier
:return: the formatted percent number
:rtype: `unicode`
"""
locale = Locale.parse(locale)
if not format:
format = locale.percent_formats.get(format)
pattern = parse_pattern(format)
return pattern.apply(number, locale)
def format_scientific(number, format=None, locale=LC_NUMERIC):
"""Return value formatted in scientific notation for a specific locale.
>>> format_scientific(10000, locale='en_US')
u'1E4'
The format pattern can also be specified explicitly:
>>> format_scientific(1234567, u'##0E00', locale='en_US')
u'1.23E06'
:param number: the number to format
:param format:
:param locale: the `Locale` object or locale identifier
:return: value formatted in scientific notation.
:rtype: `unicode`
"""
locale = Locale.parse(locale)
if not format:
format = locale.scientific_formats.get(format)
pattern = parse_pattern(format)
return pattern.apply(number, locale)
class NumberFormatError(ValueError):
"""Exception raised when a string cannot be parsed into a number."""
def parse_number(string, locale=LC_NUMERIC):
"""Parse localized number string into a long integer.
>>> parse_number('1,099', locale='en_US')
1099L
>>> parse_number('1.099', locale='de_DE')
1099L
When the given string cannot be parsed, an exception is raised:
>>> parse_number('1.099,98', locale='de')
Traceback (most recent call last):
...
NumberFormatError: '1.099,98' is not a valid number
:param string: the string to parse
:param locale: the `Locale` object or locale identifier
:return: the parsed number
:rtype: `long`
:raise `NumberFormatError`: if the string can not be converted to a number
"""
try:
return long(string.replace(get_group_symbol(locale), ''))
except ValueError:
raise NumberFormatError('%r is not a valid number' % string)
def parse_decimal(string, locale=LC_NUMERIC):
"""Parse localized decimal string into a float.
>>> parse_decimal('1,099.98', locale='en_US')
1099.98
>>> parse_decimal('1.099,98', locale='de')
1099.98
When the given string cannot be parsed, an exception is raised:
>>> parse_decimal('2,109,998', locale='de')
Traceback (most recent call last):
...
NumberFormatError: '2,109,998' is not a valid decimal number
:param string: the string to parse
:param locale: the `Locale` object or locale identifier
:return: the parsed decimal number
:rtype: `float`
:raise `NumberFormatError`: if the string can not be converted to a
decimal number
"""
locale = Locale.parse(locale)
try:
return float(string.replace(get_group_symbol(locale), '')
.replace(get_decimal_symbol(locale), '.'))
except ValueError:
raise NumberFormatError('%r is not a valid decimal number' % string)
PREFIX_END = r'[^0-9@#.,]'
NUMBER_TOKEN = r'[0-9@#.\-,E+]'
PREFIX_PATTERN = r"(?P<prefix>(?:'[^']*'|%s)*)" % PREFIX_END
NUMBER_PATTERN = r"(?P<number>%s+)" % NUMBER_TOKEN
SUFFIX_PATTERN = r"(?P<suffix>.*)"
number_re = re.compile(r"%s%s%s" % (PREFIX_PATTERN, NUMBER_PATTERN,
SUFFIX_PATTERN))
def split_number(value):
"""Convert a number into a (intasstring, fractionasstring) tuple"""
if have_decimal and isinstance(value, Decimal):
text = str(value)
else:
text = ('%.9f' % value).rstrip('0')
if '.' in text:
a, b = text.split('.', 1)
if b == '0':
b = ''
else:
a, b = text, ''
return a, b
def bankersround(value, ndigits=0):
"""Round a number to a given precision.
Works like round() except that the round-half-even (banker's rounding)
algorithm is used instead of round-half-up.
>>> bankersround(5.5, 0)
6.0
>>> bankersround(6.5, 0)
6.0
>>> bankersround(-6.5, 0)
-6.0
>>> bankersround(1234.0, -2)
1200.0
"""
sign = int(value < 0) and -1 or 1
value = abs(value)
a, b = split_number(value)
digits = a + b
add = 0
i = len(a) + ndigits
if i < 0 or i >= len(digits):
pass
elif digits[i] > '5':
add = 1
elif digits[i] == '5' and digits[i-1] in '13579':
add = 1
scale = 10**ndigits
if have_decimal and isinstance(value, Decimal):
return Decimal(int(value * scale + add)) / scale * sign
else:
return float(int(value * scale + add)) / scale * sign
def parse_pattern(pattern):
"""Parse number format patterns"""
if isinstance(pattern, NumberPattern):
return pattern
# Do we have a negative subpattern?
if ';' in pattern:
pattern, neg_pattern = pattern.split(';', 1)
pos_prefix, number, pos_suffix = number_re.search(pattern).groups()
neg_prefix, _, neg_suffix = number_re.search(neg_pattern).groups()
else:
pos_prefix, number, pos_suffix = number_re.search(pattern).groups()
neg_prefix = '-' + pos_prefix
neg_suffix = pos_suffix
if 'E' in number:
number, exp = number.split('E', 1)
else:
exp = None
if '@' in number:
if '.' in number and '0' in number:
raise ValueError('Significant digit patterns can not contain '
'"@" or "0"')
if '.' in number:
integer, fraction = number.rsplit('.', 1)
else:
integer = number
fraction = ''
min_frac = max_frac = 0
def parse_precision(p):
"""Calculate the min and max allowed digits"""
min = max = 0
for c in p:
if c in '@0':
min += 1
max += 1
elif c == '#':
max += 1
elif c == ',':
continue
else:
break
return min, max
def parse_grouping(p):
"""Parse primary and secondary digit grouping
>>> parse_grouping('##')
0, 0
>>> parse_grouping('#,###')
3, 3
>>> parse_grouping('#,####,###')
3, 4
"""
width = len(p)
g1 = p.rfind(',')
if g1 == -1:
return 1000, 1000
g1 = width - g1 - 1
g2 = p[:-g1 - 1].rfind(',')
if g2 == -1:
return g1, g1
g2 = width - g1 - g2 - 2
return g1, g2
int_prec = parse_precision(integer)
frac_prec = parse_precision(fraction)
if exp:
frac_prec = parse_precision(integer+fraction)
exp_plus = exp.startswith('+')
exp = exp.lstrip('+')
exp_prec = parse_precision(exp)
else:
exp_plus = None
exp_prec = None
grouping = parse_grouping(integer)
return NumberPattern(pattern, (pos_prefix, neg_prefix),
(pos_suffix, neg_suffix), grouping,
int_prec, frac_prec,
exp_prec, exp_plus)
class NumberPattern(object):
def __init__(self, pattern, prefix, suffix, grouping,
int_prec, frac_prec, exp_prec, exp_plus):
self.pattern = pattern
self.prefix = prefix
self.suffix = suffix
self.grouping = grouping
self.int_prec = int_prec
self.frac_prec = frac_prec
self.exp_prec = exp_prec
self.exp_plus = exp_plus
if '%' in ''.join(self.prefix + self.suffix):
self.scale = 100
elif u'‰' in ''.join(self.prefix + self.suffix):
self.scale = 1000
else:
self.scale = 1
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self.pattern)
def apply(self, value, locale, currency=None):
value *= self.scale
is_negative = int(value < 0)
if self.exp_prec: # Scientific notation
value = abs(value)
if value:
exp = int(math.floor(math.log(value, 10)))
else:
exp = 0
# Minimum number of integer digits
if self.int_prec[0] == self.int_prec[1]:
exp -= self.int_prec[0] - 1
# Exponent grouping
elif self.int_prec[1]:
exp = int(exp) / self.int_prec[1] * self.int_prec[1]
if not have_decimal or not isinstance(value, Decimal):
value = float(value)
if exp < 0:
value = value * 10**(-exp)
else:
value = value / 10**exp
exp_sign = ''
if exp < 0:
exp_sign = get_minus_sign_symbol(locale)
elif self.exp_plus:
exp_sign = get_plus_sign_symbol(locale)
exp = abs(exp)
number = u'%s%s%s%s' % \
(self._format_sigdig(value, self.frac_prec[0],
self.frac_prec[1]),
get_exponential_symbol(locale), exp_sign,
self._format_int(str(exp), self.exp_prec[0],
self.exp_prec[1], locale))
elif '@' in self.pattern: # Is it a siginificant digits pattern?
text = self._format_sigdig(abs(value),
self.int_prec[0],
self.int_prec[1])
if '.' in text:
a, b = text.split('.')
a = self._format_int(a, 0, 1000, locale)
if b:
b = get_decimal_symbol(locale) + b
number = a + b
else:
number = self._format_int(text, 0, 1000, locale)
else: # A normal number pattern
a, b = split_number(bankersround(abs(value),
self.frac_prec[1]))
b = b or '0'
a = self._format_int(a, self.int_prec[0],
self.int_prec[1], locale)
b = self._format_frac(b, locale)
number = a + b
retval = u'%s%s%s' % (self.prefix[is_negative], number,
self.suffix[is_negative])
if u'¤' in retval:
retval = retval.replace(u'¤¤', currency.upper())
retval = retval.replace(u'¤', get_currency_symbol(currency, locale))
return retval
def _format_sigdig(self, value, min, max):
"""Convert value to a string.
The resulting string will contain between (min, max) number of
significant digits.
"""
a, b = split_number(value)
ndecimals = len(a)
if a == '0' and b != '':
ndecimals = 0
while b.startswith('0'):
b = b[1:]
ndecimals -= 1
a, b = split_number(bankersround(value, max - ndecimals))
digits = len((a + b).lstrip('0'))
if not digits:
digits = 1
# Figure out if we need to add any trailing '0':s
if len(a) >= max and a != '0':
return a
if digits < min:
b += ('0' * (min - digits))
if b:
return '%s.%s' % (a, b)
return a
def _format_int(self, value, min, max, locale):
width = len(value)
if width < min:
value = '0' * (min - width) + value
gsize = self.grouping[0]
ret = ''
symbol = get_group_symbol(locale)
while len(value) > gsize:
ret = symbol + value[-gsize:] + ret
value = value[:-gsize]
gsize = self.grouping[1]
return value + ret
def _format_frac(self, value, locale):
min, max = self.frac_prec
if len(value) < min:
value += ('0' * (min - len(value)))
if max == 0 or (min == 0 and int(value) == 0):
return ''
width = len(value)
while len(value) > min and value[-1] == '0':
value = value[:-1]
return get_decimal_symbol(locale) + value
|
the-stack_0_13207 | from heapq import *
from typing import List, Union, Tuple
import numpy as np
from skimage.draw import line as skline
from seedpod_ground_risk.pathfinding.algorithm import Algorithm
from seedpod_ground_risk.pathfinding.environment import GridEnvironment, Node
from seedpod_ground_risk.pathfinding.heuristic import Heuristic, ManhattanHeuristic
def _reconstruct_path(end: Node, grid: np.ndarray, smooth=True) -> List[Node]:
reverse_path = []
reverse_path_append = reverse_path.append
reverse_path_append(end)
node = end
while node is not None:
reverse_path_append(node)
if node.parent is None:
break
if node == node.parent:
reverse_path_append(node.parent)
break
node = node.parent
path = list(reversed(reverse_path))
if not smooth:
return path
def get_path_sum(nx, ny, tx, ty, grid):
line = skline(nx, ny, tx, ty)
line_points = grid[line[0], line[1]]
# If the new line crosses any blocked areas the cost is inf
if -1 in line_points:
return np.inf
else:
return line_points.sum()
def jump_path(node: Node, path, grid, goal: Node):
ny, nx = node.position
gy, gx = goal.position
if get_path_sum(nx, ny, gx, gy, grid) == 0:
return goal
start_node_index = path.index(node)
next_node_index = start_node_index + 1
for test_node_index in reversed(range(len(path))):
# Ensure still looking forward from start node
if test_node_index > next_node_index:
ty, tx = path[test_node_index].position
path_x = [p.position[1] for p in path[start_node_index:test_node_index]]
path_y = [p.position[0] for p in path[start_node_index:test_node_index]]
existing_path_sum = grid[path_y, path_x].sum()
test_path_sum = get_path_sum(nx, ny, tx, ty, grid)
if test_path_sum <= existing_path_sum:
return path[test_node_index]
return path[next_node_index]
simplfied_path = []
next_node = path[0]
simplfied_path.append(next_node)
while next_node != end:
jump_node = jump_path(next_node, path, grid, end)
simplfied_path.append(jump_node)
next_node = jump_node
return simplfied_path
class GridAStar(Algorithm):
def __init__(self, heuristic: Heuristic = ManhattanHeuristic()):
self.heuristic = heuristic.h
def find_path(self, environment: GridEnvironment, start: Node, end: Node) -> Union[
List[Node], None]:
pass
# Canonical algorithm from literature
class RiskAStar(Algorithm):
def find_path(self, environment: GridEnvironment, start: Node, end: Node, k=0.9, smooth=True, **kwargs) -> Union[
List[Node], None]:
grid = environment.grid
min_dist = 2 ** 0.5
goal_val = grid[end.position]
# Use heapq;the thread safety provided by PriorityQueue is not needed, as we only exec on a single thread
open = [start]
start.f = start.g = start.h = 0
open_cost = {start: start.f}
closed = set()
while open:
node = heappop(open)
if node in open_cost:
open_cost.pop(node)
if node in closed:
continue
closed.add(node)
if node == end:
return _reconstruct_path(node, grid, smooth=smooth)
current_cost = node.f
node_val = grid[node.position]
for neighbour in environment.get_neighbours(node):
cost = current_cost \
+ (((grid[neighbour.position] + node_val) / 2)
* (((node.position[1] - neighbour.position[1]) ** 2 + (
node.position[0] - neighbour.position[0]) ** 2) ** 0.5))
if cost < neighbour.g:
neighbour.g = cost
dist = ((node.position[1] - end.position[1]) ** 2 + (
node.position[0] - end.position[0]) ** 2) ** 0.5
line = skline(node.position[1], node.position[0], end.position[1], end.position[0])
min_val = grid[line[0], line[1]].min()
node_val = grid[node.position]
h = k * ((((node_val + goal_val) / 2) * min_dist) + ((dist - min_dist) * min_val))
# h = self.heuristic(neighbour.position, end.position)
neighbour.h = h
neighbour.f = cost + h
neighbour.parent = node
if neighbour not in open_cost or neighbour.f < open_cost[neighbour]:
heappush(open, neighbour)
open_cost[neighbour] = neighbour.f
return None
class RiskGridAStar(GridAStar):
def find_path(self, environment: GridEnvironment, start: Node, end: Node, k=1, smooth=True, **kwargs) -> Union[
List[Node], None]:
grid = environment.grid
# Use heapq;the thread safety provided by PriorityQueue is not needed, as we only exec on a single thread
open = [start]
start.f = start.g = start.h = 0
open_cost = {start: start.f}
closed = set()
while open:
node = heappop(open)
if node in open_cost:
open_cost.pop(node)
if node in closed:
continue
closed.add(node)
if node == end:
return _reconstruct_path(node, grid, smooth=smooth)
current_cost = node.f
for neighbour in environment.get_neighbours(node):
cost = current_cost + grid[neighbour.position]
if cost < neighbour.g:
neighbour.g = cost
h = abs((node.position[0] - end.position[0])) + abs((node.position[1] - end.position[1]))
neighbour.h = h
neighbour.f = cost + (k * h)
neighbour.parent = node
if neighbour not in open_cost or neighbour.f < open_cost[neighbour]:
heappush(open, neighbour)
open_cost[neighbour] = neighbour.f
return None
class JumpPointSearchAStar(GridAStar):
def find_path(self, environment: GridEnvironment, start: Node, end: Node) -> Union[
List[Node], None]:
if not environment.diagonals:
raise ValueError('JPS relies on a grid environment with diagonals')
self.environment = environment
grid = environment.grid
self._max_y, self._max_x = self.environment.grid.shape[0] - 1, self.environment.grid.shape[1] - 1
self.goal = end
# Use heapq;the thread safety provided by ProrityQueue is not needed, as we only exec on a single thread
open = [start]
start.f = start.g = start.h = 0
open_cost = {start: start.f}
closed = set()
while open:
node = heappop(open)
open_cost.pop(node)
if node in closed:
continue
closed.add(node)
if node == end:
return _reconstruct_path(end, grid)
current_cost = node.f
cy, cx = node.position
successors = []
for neighbour in environment.get_neighbours(node):
dx, dy = neighbour.position[1] - cx, neighbour.position[0] - cy
jump_point = self._jump(cy, cx, dy, dx)
if jump_point:
successors.append(Node(jump_point))
for successor in successors:
cost = current_cost + grid[successor.position]
if cost < successor.g:
successor.g = cost
h = self.heuristic(successor.position, end.position)
successor.h = h
successor.f = h + cost
if successor not in open_cost or successor.f < open_cost[successor]:
heappush(open, successor)
open_cost[successor] = successor.f
return None
def _jump(self, cy: int, cx: int, dy: int, dx: int) -> Tuple[int, int]:
ny, nx = cy + dy, cx + dx
if not self._is_passable(ny, nx):
return None
if nx == self.goal[1] and ny == self.goal[0]:
return ny, nx
if dx and dy:
# Diagonal case
if (self._is_passable(nx - dx, ny + dy) and not self._is_passable(nx - dx, ny)) or \
(self._is_passable(nx + dx, ny - dy) and not self._is_passable(nx, ny - dy)):
return ny, nx
# Orthogonal searches
if self._jump(ny, nx, dy, 0) or self._jump(ny, nx, 0, dx):
return ny, nx
else:
# Orthogonal case
if dx:
if (self._is_passable(nx + dx, ny + 1) and not self._is_passable(nx, ny + 1)) or \
(self._is_passable(nx + dx, ny - 1) and not self._is_passable(nx, ny - 1)):
return ny, nx
else: # dy
if (self._is_passable(nx + 1, ny + dy) and not self._is_passable(nx + 1, ny)) or \
(self._is_passable(nx - 1, ny + dy) and not self._is_passable(nx - 1, ny)):
return ny, nx
return self._jump(ny, nx, dy, dx)
def _is_passable(self, y, x):
if y < 0 or y > self._max_y or x < 0 or x > self._max_x:
return False
return self.environment.grid[y, x] > -1
|
the-stack_0_13212 | from math import radians
import numpy as np
import geopandas as gpd
import pandas as pd
from shapely.geometry import Point
from sklearn.cluster import DBSCAN
from tqdm import tqdm
from trackintel.geogr.distances import meters_to_decimal_degrees
def generate_locations(
staypoints,
method="dbscan",
epsilon=100,
num_samples=1,
distance_metric="haversine",
agg_level="user",
print_progress=False,
):
"""
Generate locations from the staypoints.
Parameters
----------
staypoints : GeoDataFrame (as trackintel staypoints)
The staypoints have to follow the standard definition for staypoints DataFrames.
method : {'dbscan'}
Method to create locations.
- 'dbscan' : Uses the DBSCAN algorithm to cluster staypoints.
epsilon : float, default 100
The epsilon for the 'dbscan' method. if 'distance_metric' is 'haversine'
or 'euclidean', the unit is in meters.
num_samples : int, default 1
The minimal number of samples in a cluster.
distance_metric: {'haversine', 'euclidean'}
The distance metric used by the applied method. Any mentioned below are possible:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise_distances.html
agg_level: {'user','dataset'}
The level of aggregation when generating locations:
- 'user' : locations are generated independently per-user.
- 'dataset' : shared locations are generated for all users.
print_progress : bool, default False
If print_progress is True, the progress bar is displayed
Returns
-------
ret_sp: GeoDataFrame (as trackintel staypoints)
The original staypoints with a new column ``[`location_id`]``.
ret_loc: GeoDataFrame (as trackintel locations)
The generated locations.
Examples
--------
>>> stps.as_staypoints.generate_locations(method='dbscan', epsilon=100, num_samples=1)
"""
if agg_level not in ["user", "dataset"]:
raise AttributeError("The parameter agg_level must be one of ['user', 'dataset'].")
if method not in ["dbscan"]:
raise AttributeError("The parameter method must be one of ['dbscan'].")
# initialize the return GeoDataFrames
ret_stps = staypoints.copy()
ret_stps = ret_stps.sort_values(["user_id", "started_at"])
geo_col = ret_stps.geometry.name
if method == "dbscan":
if distance_metric == "haversine":
# The input and output of sklearn's harvarsine metrix are both in radians,
# see https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.haversine_distances.html
# here the 'epsilon' is directly applied to the metric's output.
# convert to radius
db = DBSCAN(eps=epsilon / 6371000, min_samples=num_samples, algorithm="ball_tree", metric=distance_metric)
else:
db = DBSCAN(eps=epsilon, min_samples=num_samples, algorithm="ball_tree", metric=distance_metric)
if agg_level == "user":
if print_progress:
tqdm.pandas(desc="User location generation")
ret_stps = ret_stps.groupby("user_id", as_index=False).progress_apply(
_generate_locations_per_user,
geo_col=geo_col,
distance_metric=distance_metric,
db=db,
)
else:
ret_stps = ret_stps.groupby("user_id", as_index=False).apply(
_generate_locations_per_user,
geo_col=geo_col,
distance_metric=distance_metric,
db=db,
)
# keeping track of noise labels
ret_stps_non_noise_labels = ret_stps[ret_stps["location_id"] != -1]
ret_stps_noise_labels = ret_stps[ret_stps["location_id"] == -1]
# sort so that the last location id of a user = max(location id)
ret_stps_non_noise_labels = ret_stps_non_noise_labels.sort_values(["user_id", "location_id"])
# identify start positions of new user_ids
start_of_user_id = ret_stps_non_noise_labels["user_id"] != ret_stps_non_noise_labels["user_id"].shift(1)
# calculate the offset (= last location id of the previous user)
# multiplication is to mask all positions where no new user starts and addition is to have a +1 when a
# new user starts
loc_id_offset = ret_stps_non_noise_labels["location_id"].shift(1) * start_of_user_id + start_of_user_id
# fill first nan with 0 and create the cumulative sum
loc_id_offset = loc_id_offset.fillna(0).cumsum()
ret_stps_non_noise_labels["location_id"] = ret_stps_non_noise_labels["location_id"] + loc_id_offset
ret_stps = gpd.GeoDataFrame(pd.concat([ret_stps_non_noise_labels, ret_stps_noise_labels]), geometry=geo_col)
ret_stps.sort_values(["user_id", "started_at"], inplace=True)
else:
if distance_metric == "haversine":
# the input is converted to list of (lat, lon) tuples in radians unit
p = np.array([[radians(g.y), radians(g.x)] for g in ret_stps.geometry])
else:
p = np.array([[g.x, g.y] for g in ret_stps.geometry])
labels = db.fit_predict(p)
ret_stps["location_id"] = labels
### create locations as grouped staypoints
temp_sp = ret_stps[["user_id", "location_id", ret_stps.geometry.name]]
if agg_level == "user":
# directly dissolve by 'user_id' and 'location_id'
ret_loc = temp_sp.dissolve(by=["user_id", "location_id"], as_index=False)
else:
## generate user-location pairs with same geometries across users
# get user-location pairs
ret_loc = temp_sp.dissolve(by=["user_id", "location_id"], as_index=False).drop(
columns={temp_sp.geometry.name}
)
# get location geometries
geom_df = temp_sp.dissolve(by=["location_id"], as_index=False).drop(columns={"user_id"})
# merge pairs with location geometries
ret_loc = ret_loc.merge(geom_df, on="location_id", how="left")
# filter stps not belonging to locations
ret_loc = ret_loc.loc[ret_loc["location_id"] != -1]
ret_loc["center"] = None # initialize
# locations with only one staypoints is of type "Point"
point_idx = ret_loc.geom_type == "Point"
if not ret_loc.loc[point_idx].empty:
ret_loc.loc[point_idx, "center"] = ret_loc.loc[point_idx, ret_loc.geometry.name]
# locations with multiple staypoints is of type "MultiPoint"
if not ret_loc.loc[~point_idx].empty:
ret_loc.loc[~point_idx, "center"] = ret_loc.loc[~point_idx, ret_loc.geometry.name].apply(
lambda p: Point(np.array(p)[:, 0].mean(), np.array(p)[:, 1].mean())
)
# extent is the convex hull of the geometry
ret_loc["extent"] = None # initialize
if not ret_loc.empty:
ret_loc["extent"] = ret_loc[ret_loc.geometry.name].apply(lambda p: p.convex_hull)
# convex_hull of one point would be a Point and two points a Linestring,
# we change them into Polygon by creating a buffer of epsilon around them.
pointLine_idx = (ret_loc["extent"].geom_type == "LineString") | (ret_loc["extent"].geom_type == "Point")
if not ret_loc.loc[pointLine_idx].empty:
# Perform meter to decimal conversion if the distance metric is haversine
if distance_metric == "haversine":
ret_loc.loc[pointLine_idx, "extent"] = ret_loc.loc[pointLine_idx].apply(
lambda p: p["extent"].buffer(meters_to_decimal_degrees(epsilon, p["center"].y)), axis=1
)
else:
ret_loc.loc[pointLine_idx, "extent"] = ret_loc.loc[pointLine_idx].apply(
lambda p: p["extent"].buffer(epsilon), axis=1
)
ret_loc = ret_loc.set_geometry("center")
ret_loc = ret_loc[["user_id", "location_id", "center", "extent"]]
# index management
ret_loc.rename(columns={"location_id": "id"}, inplace=True)
ret_loc.set_index("id", inplace=True)
# stps not linked to a location receive np.nan in 'location_id'
ret_stps.loc[ret_stps["location_id"] == -1, "location_id"] = np.nan
## dtype consistency
# locs id (generated by this function) should be int64
ret_loc.index = ret_loc.index.astype("int64")
# location_id of stps can only be in Int64 (missing values)
ret_stps["location_id"] = ret_stps["location_id"].astype("Int64")
# user_id of ret_loc should be the same as ret_stps
ret_loc["user_id"] = ret_loc["user_id"].astype(ret_stps["user_id"].dtype)
return ret_stps, ret_loc
def _generate_locations_per_user(user_staypoints, distance_metric, db, geo_col):
"""function called after groupby: should only contain records of one user;
see generate_locations() function for parameter meaning."""
if distance_metric == "haversine":
# the input is converted to list of (lat, lon) tuples in radians unit
p = np.array([[radians(q.y), radians(q.x)] for q in (user_staypoints[geo_col])])
else:
p = np.array([[q.x, q.y] for q in (user_staypoints[geo_col])])
labels = db.fit_predict(p)
# add staypoint - location matching to original staypoints
user_staypoints["location_id"] = labels
user_staypoints = gpd.GeoDataFrame(user_staypoints, geometry=geo_col)
return user_staypoints
|
the-stack_0_13214 | import os
import sys
sys.path.append("../../../../monk_v1/");
sys.path.append("../../../monk/");
import psutil
from keras_prototype import prototype
from compare_prototype import compare
from common import print_start
from common import print_status
import tensorflow as tf
if(tf.__version__[0] == '2'):
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
def test_layer_average_pooling1d(system_dict):
forward = True;
test = "test_layer_average_pooling1d";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
gtf = prototype(verbose=0);
gtf.Prototype("sample-project-1", "sample-experiment-1");
network = [];
network.append(gtf.average_pooling1d(kernel_size=3));
gtf.Compile_Network(network, data_shape=(3, 32), use_gpu=False);
x = tf.placeholder(tf.float32, shape=(1, 32, 3))
y = gtf.system_dict["local"]["model"](x);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
return system_dict
|
the-stack_0_13215 | from fontTools.misc.py23 import bytesjoin, strjoin, tobytes, tostr
from fontTools.misc.textTools import safeEval
from fontTools.misc import sstruct
from . import DefaultTable
import base64
DSIG_HeaderFormat = """
> # big endian
ulVersion: L
usNumSigs: H
usFlag: H
"""
# followed by an array of usNumSigs DSIG_Signature records
DSIG_SignatureFormat = """
> # big endian
ulFormat: L
ulLength: L # length includes DSIG_SignatureBlock header
ulOffset: L
"""
# followed by an array of usNumSigs DSIG_SignatureBlock records,
# each followed immediately by the pkcs7 bytes
DSIG_SignatureBlockFormat = """
> # big endian
usReserved1: H
usReserved2: H
cbSignature: l # length of following raw pkcs7 data
"""
#
# NOTE
# the DSIG table format allows for SignatureBlocks residing
# anywhere in the table and possibly in a different order as
# listed in the array after the first table header
#
# this implementation does not keep track of any gaps and/or data
# before or after the actual signature blocks while decompiling,
# and puts them in the same physical order as listed in the header
# on compilation with no padding whatsoever.
#
class table_D_S_I_G_(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
dummy, newData = sstruct.unpack2(DSIG_HeaderFormat, data, self)
assert self.ulVersion == 1, "DSIG ulVersion must be 1"
assert self.usFlag & ~1 == 0, "DSIG usFlag must be 0x1 or 0x0"
self.signatureRecords = sigrecs = []
for n in range(self.usNumSigs):
sigrec, newData = sstruct.unpack2(DSIG_SignatureFormat, newData, SignatureRecord())
assert sigrec.ulFormat == 1, "DSIG signature record #%d ulFormat must be 1" % n
sigrecs.append(sigrec)
for sigrec in sigrecs:
dummy, newData = sstruct.unpack2(DSIG_SignatureBlockFormat, data[sigrec.ulOffset:], sigrec)
assert sigrec.usReserved1 == 0, "DSIG signature record #%d usReserverd1 must be 0" % n
assert sigrec.usReserved2 == 0, "DSIG signature record #%d usReserverd2 must be 0" % n
sigrec.pkcs7 = newData[:sigrec.cbSignature]
def compile(self, ttFont):
packed = sstruct.pack(DSIG_HeaderFormat, self)
headers = [packed]
offset = len(packed) + self.usNumSigs * sstruct.calcsize(DSIG_SignatureFormat)
data = []
for sigrec in self.signatureRecords:
# first pack signature block
sigrec.cbSignature = len(sigrec.pkcs7)
packed = sstruct.pack(DSIG_SignatureBlockFormat, sigrec) + sigrec.pkcs7
data.append(packed)
# update redundant length field
sigrec.ulLength = len(packed)
# update running table offset
sigrec.ulOffset = offset
headers.append(sstruct.pack(DSIG_SignatureFormat, sigrec))
offset += sigrec.ulLength
if offset % 2:
# Pad to even bytes
data.append(b'\0')
return bytesjoin(headers+data)
def toXML(self, xmlWriter, ttFont):
xmlWriter.comment("note that the Digital Signature will be invalid after recompilation!")
xmlWriter.newline()
xmlWriter.simpletag("tableHeader", version=self.ulVersion, numSigs=self.usNumSigs, flag="0x%X" % self.usFlag)
for sigrec in self.signatureRecords:
xmlWriter.newline()
sigrec.toXML(xmlWriter, ttFont)
xmlWriter.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == "tableHeader":
self.signatureRecords = []
self.ulVersion = safeEval(attrs["version"])
self.usNumSigs = safeEval(attrs["numSigs"])
self.usFlag = safeEval(attrs["flag"])
return
if name == "SignatureRecord":
sigrec = SignatureRecord()
sigrec.fromXML(name, attrs, content, ttFont)
self.signatureRecords.append(sigrec)
pem_spam = lambda l, spam = {
"-----BEGIN PKCS7-----": True, "-----END PKCS7-----": True, "": True
}: not spam.get(l.strip())
def b64encode(b):
s = base64.b64encode(b)
# Line-break at 76 chars.
items = []
while s:
items.append(tostr(s[:76]))
items.append('\n')
s = s[76:]
return strjoin(items)
class SignatureRecord(object):
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.__dict__)
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, format=self.ulFormat)
writer.newline()
writer.write_noindent("-----BEGIN PKCS7-----\n")
writer.write_noindent(b64encode(self.pkcs7))
writer.write_noindent("-----END PKCS7-----\n")
writer.endtag(self.__class__.__name__)
def fromXML(self, name, attrs, content, ttFont):
self.ulFormat = safeEval(attrs["format"])
self.usReserved1 = safeEval(attrs.get("reserved1", "0"))
self.usReserved2 = safeEval(attrs.get("reserved2", "0"))
self.pkcs7 = base64.b64decode(tobytes(strjoin(filter(pem_spam, content))))
|
the-stack_0_13218 | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPES_URL = reverse('recipe:recipe-list')
def detail_url(recipe_id):
"""Return recipe details URL"""
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name='Main course'):
"""Create nd returrn a sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Cinnamon'):
"""Create and return a sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
"""Create and return a sample recipe"""
defaults = {
'title': 'Simple recipe',
'time_minutes': 10,
'price': 5.00
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
class PublicRecipeApiTests(TestCase):
"""Test unauthenticated recie API access"""
def setUp(self):
self.client = APIClient()
def test_auth_required(self):
"""Test unauthenticated recipe API access"""
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTests(TestCase):
"""Test unauthenticated recipe API access"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'[email protected]',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
"""Test retrieving a list of recipes"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""Test retrieving recipes for user"""
user2 = get_user_model().objects.create_user(
'[email protected]',
'password123'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""Test viewing a recipe detail"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
"""Test creating recipe"""
payload = {
'title': 'Chocolate cheesecake',
'time_minutes': 30,
'price': 5.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
def test_create_recipe_with_tags(self):
"""Test creating a recipe with tags"""
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Dessert')
payload = {
'title': 'Avocado lime cheesecake',
'tags': [tag1.id, tag2.id],
'time_minutes': 60,
'price': 20.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredients(self):
"""Test creating recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user, name='Prawns')
ingredient2 = sample_ingredient(user=self.user, name='Ginger')
payload = {
'title': 'Thai prawn red curry',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 20,
'price': 7.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recipe(self):
"""Test updating a recipe with patch"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name='Curry')
payload = {'title': 'Chicken tikka', 'tags': [new_tag.id]}
url = detail_url(recipe.id)
self.client.patch(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_full_updte_recipe(self):
"""Test updating a recipe with put"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Spaghetti carbonara',
'time_minutes': 25,
'price': 5.00
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
|
the-stack_0_13219 | from sqlalchemy import desc, func
from sqlalchemy.orm import Session
from sqlalchemy.sql.expression import and_
from app.models import Fund, Holding, News, Trades
def get_etf_profile(db: Session, symbol: str):
return db.query(Fund).filter(Fund.symbol == symbol).one()
def get_etf_current_holdings(db: Session, symbols: str, limit: int):
subq = (
db.query(
Holding.fund,
func.max(Holding.date).label("maxdate"),
)
.filter(
Holding.fund.in_([s for s in symbols]),
)
.group_by(Holding.fund)
.subquery()
)
q = (
db.query(
Holding.fund,
Holding.date,
Holding.ticker,
Holding.company,
Holding.cusip,
Holding.shares,
Holding.market_value,
Holding.share_price,
Holding.weight,
Holding.weight_rank,
)
.join(
subq,
and_(
Holding.fund == subq.c.fund,
Holding.date == subq.c.maxdate,
),
)
.order_by(
Holding.date,
Holding.fund,
Holding.weight_rank,
)
)
if limit:
return q.order_by("date", "weight_rank").limit(limit).all()
else:
return q.all()
def get_etf_holdings(
db: Session, symbols: str, date_from: str, date_to: str, limit: int
):
q = (
db.query(
Holding.fund,
Holding.date,
Holding.ticker,
Holding.company,
Holding.cusip,
Holding.shares,
Holding.market_value,
Holding.share_price,
Holding.weight,
Holding.weight_rank,
)
.filter(
Holding.fund.in_([s for s in symbols]),
Holding.date >= date_from,
Holding.date <= date_to,
)
.order_by(
Holding.date,
Holding.fund,
Holding.weight_rank,
)
)
if limit:
return q.order_by("date", "weight_rank").limit(limit).all()
else:
return q.all()
def get_etf_holdings_dates(db: Session, symbols: str):
return (
db.query(
func.max(Holding.date).label("maxdate"),
)
.filter(Holding.fund.in_([s for s in symbols]))
.group_by(Holding.fund)
.all()
)
def get_etf_trades(
db: Session, symbols: str, start_date: str, end_date: str, limit: int
):
q = (
db.query(
Trades.fund,
Trades.date,
Trades.ticker,
Trades.company,
Trades.direction,
Trades.cusip,
Trades.shares,
Trades.etf_percent,
)
.filter(
Trades.fund.in_([s for s in symbols]),
Trades.date >= start_date,
Trades.date <= end_date,
)
.order_by(
Trades.date,
Trades.fund,
Trades.etf_percent.desc(),
)
)
if limit:
return q.limit(limit).all()
else:
return q.all()
def get_etf_trades_dates(db: Session, symbols: str):
return (
db.query(
func.min(Trades.date).label("mindate"),
func.max(Trades.date).label("maxdate"),
)
.filter(Trades.fund.in_([s for s in symbols]))
.one()
)
def get_etf_trades_maxdate(db: Session):
return (
db.query(
func.max(Trades.date).label("maxdate"),
).one()
)[0]
def get_stock_fundownership_distinct_dates(
db: Session, symbol: str, date_from: str, date_to: str
):
return (
db.query(Holding.date)
.filter(
Holding.ticker == symbol,
Holding.date >= date_from,
Holding.date <= date_to,
)
.distinct()
)
def get_stock_fundownership(db: Session, symbol: str, date: str):
return (
db.query(Holding)
.filter(
Holding.ticker == symbol,
Holding.date == date,
)
.all()
)
def get_stock_fundownership_dates(db: Session, symbol: str):
return (
db.query(
func.min(Holding.date).label("mindate"),
func.max(Holding.date).label("maxdate"),
)
.filter(Holding.ticker == symbol)
.first()
)
def get_stock_trades(
db: Session, symbol: str, direction: str, date_from: str, date_to: str, limit: int
):
if direction:
q = (
db.query(Trades)
.filter(
Trades.ticker == symbol,
Trades.direction == direction.capitalize(),
Trades.date >= date_from,
Trades.date <= date_to,
)
.order_by(Trades.date.desc(), Trades.fund)
)
else:
q = (
db.query(Trades)
.filter(
Trades.ticker == symbol,
Trades.date >= date_from,
Trades.date <= date_to,
)
.order_by(Trades.date.desc(), Trades.fund)
)
if limit:
return q.limit(limit).all()
else:
return q.all()
def get_stock_trades_dates(db: Session, symbol: str):
return (
db.query(
func.min(Trades.date).label("mindate"),
func.max(Trades.date).label("maxdate"),
)
.filter(Trades.ticker == symbol)
.one()
)
def get_etf_news(db: Session, symbols: str, date_from: str, date_to: str, limit: int):
return (
db.query(News)
.filter(
News.category == "etf",
News.datetime >= date_from,
News.datetime <= date_to,
News.related.in_([s for s in symbols]),
)
.order_by(desc("datetime"))
.limit(limit)
.all()
)
def get_etf_news_min_date(db: Session, symbols: str):
return (
db.query(
func.min(News.datetime).label("mindate"),
)
.filter(
News.related.in_([s for s in symbols]),
News.category == "etf",
)
.one()
)[0]
|
the-stack_0_13220 | """
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import enum
import math
import re
import warnings
from collections import OrderedDict
from copy import copy
from functools import partial
from pathlib import Path
class ConfigError(ValueError):
pass
class BaseValidator:
def __init__(self, on_error=None, additional_validator=None):
self.on_error = on_error
self.additional_validator = additional_validator
self.field_uri = None
def validate(self, entry, field_uri=None):
field_uri = field_uri or self.field_uri
if self.additional_validator and not self.additional_validator(entry, field_uri):
self.raise_error(entry, field_uri)
def raise_error(self, value, field_uri, reason=None):
if self.on_error:
self.on_error(value, field_uri, reason)
error_message = 'Invalid value "{value}" for {field_uri}'.format(value=value, field_uri=field_uri)
if reason:
error_message = '{error_message}: {reason}'.format(error_message=error_message, reason=reason)
raise ConfigError(error_message.format(value, field_uri))
class _ExtraArgumentBehaviour(enum.Enum):
WARN = 'warn'
IGNORE = 'ignore'
ERROR = 'error'
_WARN_ON_EXTRA_ARGUMENT = _ExtraArgumentBehaviour.WARN
_ERROR_ON_EXTRA_ARGUMENT = _ExtraArgumentBehaviour.ERROR
_IGNORE_ON_EXTRA_ARGUMENT = _ExtraArgumentBehaviour.IGNORE
def _is_dict_like(entry):
return hasattr(entry, '__iter__') and hasattr(entry, '__getitem__')
class ConfigValidator(BaseValidator):
WARN_ON_EXTRA_ARGUMENT = _WARN_ON_EXTRA_ARGUMENT
ERROR_ON_EXTRA_ARGUMENT = _ERROR_ON_EXTRA_ARGUMENT
IGNORE_ON_EXTRA_ARGUMENT = _IGNORE_ON_EXTRA_ARGUMENT
def __init__(self, config_uri, on_extra_argument=_WARN_ON_EXTRA_ARGUMENT, **kwargs):
super().__init__(**kwargs)
self.on_extra_argument = _ExtraArgumentBehaviour(on_extra_argument)
self._fields = OrderedDict()
self.field_uri = config_uri
for name in dir(self):
value = getattr(self, name)
if not isinstance(value, BaseField):
continue
field_copy = copy(value)
field_copy.field_uri = "{}.{}".format(config_uri, name)
self._fields[name] = field_copy
def validate(self, entry, field_uri=None):
super().validate(entry, field_uri)
field_uri = field_uri or self.field_uri
if not _is_dict_like(entry):
raise ConfigError("{} is expected to be dict-like".format(field_uri))
extra_arguments = []
for key in entry:
if key not in self._fields:
extra_arguments.append(key)
continue
self._fields[key].validate(entry[key])
required_fields = set(field_name for field_name, field_value in self._fields.items()
if not field_value.optional)
missing_arguments = required_fields.difference(entry)
if missing_arguments:
self.raise_error(entry, field_uri,
"Invalid config for {}: missing required fields: {}".format(field_uri, ', '.join(
map(str, missing_arguments))))
if extra_arguments:
unknown_options_error = "specifies unknown options: {}".format(extra_arguments)
message = "{} {}".format(field_uri, unknown_options_error)
if self.on_extra_argument == _ExtraArgumentBehaviour.WARN:
warnings.warn(message)
if self.on_extra_argument == _ExtraArgumentBehaviour.ERROR:
self.raise_error(entry, field_uri, message)
@property
def known_fields(self):
return set(self._fields)
def raise_error(self, value, field_uri, reason=None):
if self.on_error:
self.on_error(value, field_uri, reason)
else:
raise ConfigError(reason)
class BaseField(BaseValidator):
def __init__(self, optional=False, allow_none=False, **kwargs):
super().__init__(**kwargs)
self.optional = optional
self.allow_none = allow_none
def validate(self, entry, field_uri=None):
super().validate(entry, field_uri)
field_uri = field_uri or self.field_uri
if not self.allow_none and entry is None:
raise ConfigError("{} is not allowed to be None".format(field_uri))
class StringField(BaseField):
def __init__(self, choices=None, regex=None, case_sensitive=False, **kwargs):
super().__init__(**kwargs)
self.choices = choices if case_sensitive or not choices else list(map(str.lower, choices))
self.regex = re.compile(regex, flags=re.IGNORECASE if not case_sensitive else 0) if regex else None
self.case_sensitive = case_sensitive
def validate(self, entry, field_uri=None):
super().validate(entry, field_uri)
if entry is None:
return
field_uri = field_uri or self.field_uri
source_entry = entry
if not isinstance(entry, str):
raise ConfigError("{} is expected to be str".format(source_entry))
if not self.case_sensitive:
entry = entry.lower()
if self.choices and entry not in self.choices:
reason = "unsupported option, expected one of: {}".format(', '.join(map(str, self.choices)))
self.raise_error(source_entry, field_uri, reason)
if self.regex and not self.regex.match(entry):
self.raise_error(source_entry, field_uri, reason=None)
class DictField(BaseField):
def __init__(self, key_type=None, value_type=None, validate_keys=True, validate_values=True, allow_empty=True,
**kwargs):
super().__init__(**kwargs)
self.validate_keys = validate_keys if key_type else False
self.validate_values = validate_values if value_type else False
self.key_type = _get_field_type(key_type)
self.value_type = _get_field_type(value_type)
self.allow_empty = allow_empty
def validate(self, entry, field_uri=None):
super().validate(entry, field_uri)
if entry is None:
return
field_uri = field_uri or self.field_uri
if not isinstance(entry, dict):
raise ConfigError("{} is expected to be dict".format(field_uri))
if not entry and not self.allow_empty:
self.raise_error(entry, field_uri, "value is empty")
for k, v in entry.items():
if self.validate_keys:
uri = "{}.keys.{}".format(field_uri, k)
self.key_type.validate(k, uri)
if self.validate_values:
uri = "{}.{}".format(field_uri, k)
self.value_type.validate(v, uri)
class ListField(BaseField):
def __init__(self, value_type=None, validate_values=True, allow_empty=True, **kwargs):
super().__init__(**kwargs)
self.validate_values = validate_values if value_type else False
self.value_type = _get_field_type(value_type)
self.allow_empty = allow_empty
def validate(self, entry, field_uri=None):
super().validate(entry, field_uri)
if entry is None:
return
if not isinstance(entry, list):
raise ConfigError("{} is expected to be list".format(field_uri))
if not entry and not self.allow_empty:
self.raise_error(entry, field_uri, "value is empty")
if self.validate_values:
for i, val in enumerate(entry):
self.value_type.validate(val, "{}[{}]".format(val, i))
class NumberField(BaseField):
def __init__(self, floats=True, min_value=None, max_value=None, allow_inf=False, allow_nan=False, **kwargs):
super().__init__(**kwargs)
self.floats = floats
self.min = min_value
self.max = max_value
self.allow_inf = allow_inf
self.allow_nan = allow_nan
def validate(self, entry, field_uri=None):
super().validate(entry, field_uri)
if entry is None:
return
field_uri = field_uri or self.field_uri
if not self.floats and isinstance(entry, float):
raise ConfigError("{} is expected to be int".format(field_uri))
if not isinstance(entry, int) and not isinstance(entry, float):
raise ConfigError("{} is expected to be number".format(field_uri))
if self.min is not None and entry < self.min:
reason = "value is less than minimal allowed - {}".format(self.min)
self.raise_error(entry, field_uri, reason)
if self.max is not None and entry > self.max:
reason = "value is greater than maximal allowed - {}".format(self.max)
self.raise_error(entry, field_uri, reason)
if math.isinf(entry) and not self.allow_inf:
self.raise_error(entry, field_uri, "value is infinity")
if math.isnan(entry) and not self.allow_nan:
self.raise_error(entry, field_uri, "value is NaN")
class PathField(BaseField):
def __init__(self, check_exists=False, is_directory=None, **kwargs):
super().__init__(**kwargs)
self.check_exists = check_exists
self.is_directory = is_directory
def validate(self, entry, field_uri=None):
super().validate(entry, field_uri)
if entry is None:
return
field_uri = field_uri or self.field_uri
try:
path = Path(entry)
except TypeError:
self.raise_error(entry, field_uri, "values is expected to be path-like")
if self.check_exists and not path.exists():
self.raise_error(entry, field_uri, "path does not exist")
else:
if self.is_directory and not path.is_dir():
self.raise_error(entry, field_uri, "is not a directory")
if self.is_directory is False and not path.is_file():
self.raise_error(entry, field_uri, "is a directory, regular file expected")
class BoolField(BaseField):
def validate(self, entry, field_uri=None):
super().validate(entry, field_uri)
if entry is None:
return
field_uri = field_uri or self.field_uri
if not isinstance(entry, bool):
raise ConfigError("{} is expected to be bool".format(field_uri))
def _get_field_type(key_type):
if not isinstance(key_type, BaseField):
type_ = _TYPE_TO_FIELD_CLASS.get(key_type)
if callable(type_):
return type_()
return key_type
_TYPE_TO_FIELD_CLASS = {
int: partial(NumberField, floats=False),
float: partial(NumberField, floats=True),
dict: partial(DictField, validate_keys=False, validate_values=False),
list: partial(ListField, validate_values=False),
Path: PathField,
str: StringField,
bool: BoolField,
}
|
the-stack_0_13221 | # coding: utf-8
import pprint
import re
import six
class ListScalingTagInfosByResourceIdRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'resource_type': 'str',
'resource_id': 'str'
}
attribute_map = {
'resource_type': 'resource_type',
'resource_id': 'resource_id'
}
def __init__(self, resource_type=None, resource_id=None):
"""ListScalingTagInfosByResourceIdRequest - a model defined in huaweicloud sdk"""
self._resource_type = None
self._resource_id = None
self.discriminator = None
self.resource_type = resource_type
self.resource_id = resource_id
@property
def resource_type(self):
"""Gets the resource_type of this ListScalingTagInfosByResourceIdRequest.
:return: The resource_type of this ListScalingTagInfosByResourceIdRequest.
:rtype: str
"""
return self._resource_type
@resource_type.setter
def resource_type(self, resource_type):
"""Sets the resource_type of this ListScalingTagInfosByResourceIdRequest.
:param resource_type: The resource_type of this ListScalingTagInfosByResourceIdRequest.
:type: str
"""
self._resource_type = resource_type
@property
def resource_id(self):
"""Gets the resource_id of this ListScalingTagInfosByResourceIdRequest.
:return: The resource_id of this ListScalingTagInfosByResourceIdRequest.
:rtype: str
"""
return self._resource_id
@resource_id.setter
def resource_id(self, resource_id):
"""Sets the resource_id of this ListScalingTagInfosByResourceIdRequest.
:param resource_id: The resource_id of this ListScalingTagInfosByResourceIdRequest.
:type: str
"""
self._resource_id = resource_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListScalingTagInfosByResourceIdRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_13222 | from BTcrypto import Crypto
from BT1.Encrypter import protocol_name
default_task_id = []
class SingleRawServer:
def __init__(self, info_hash, multihandler, doneflag, protocol):
self.info_hash = info_hash
self.doneflag = doneflag
self.protocol = protocol
self.multihandler = multihandler
self.rawserver = multihandler.rawserver
self.finished = False
self.running = False
self.handler = None
self.taskqueue = []
def shutdown(self):
if not self.finished:
self.multihandler.shutdown_torrent(self.info_hash)
def _shutdown(self):
if not self.finished:
self.finished = True
self.running = False
self.rawserver.kill_tasks(self.info_hash)
if self.handler:
self.handler.close_all()
def _external_connection_made(self, c, options, already_read,
encrypted=None):
if self.running:
c.set_handler(self.handler)
self.handler.externally_handshaked_connection_made(
c, options, already_read, encrypted=encrypted)
### RawServer functions ###
def add_task(self, func, delay=0, id=default_task_id):
if id is default_task_id:
id = self.info_hash
if not self.finished:
self.rawserver.add_task(func, delay, id)
# def bind(self, port, bind = '', reuse = False):
# pass # not handled here
def start_connection(self, dns, handler=None):
if not handler:
handler = self.handler
c = self.rawserver.start_connection(dns, handler)
return c
# def listen_forever(self, handler):
# pass # don't call with this
def start_listening(self, handler):
self.handler = handler
self.running = True
return self.shutdown # obviously, doesn't listen forever
def is_finished(self):
return self.finished
def get_exception_flag(self):
return self.rawserver.get_exception_flag()
class NewSocketHandler: # hand a new socket off where it belongs
def __init__(self, multihandler, connection):
self.multihandler = multihandler
self.connection = connection
connection.set_handler(self)
self.closed = False
self.buffer = ''
self.complete = False
self.read = self._read
self.write = connection.write
self.next_len = 1 + len(protocol_name)
self.next_func = self.read_header
self.multihandler.rawserver.add_task(self._auto_close, 30)
def _auto_close(self):
if not self.complete:
self.close()
def close(self):
if not self.closed:
self.connection.close()
self.closed = True
# copied from Encrypter and modified
def _read_header(self, s):
if s == chr(len(protocol_name)) + protocol_name:
self.protocol = protocol_name
return 8, self.read_options
return None
def read_header(self, s):
if self._read_header(s):
if self.multihandler.config['crypto_only']:
return None
return 8, self.read_options
if not self.multihandler.config['crypto_allowed']:
return None
self.encrypted = True
self.encrypter = Crypto(False)
self._write_buffer(s)
return self.encrypter.keylength, self.read_crypto_header
def read_crypto_header(self, s):
self.encrypter.received_key(s)
self.write(self.encrypter.pubkey + self.encrypter.padding())
self._max_search = 520
return 0, self.read_crypto_block3a
def _search_for_pattern(self, s, pat):
p = s.find(pat)
if p < 0:
self._max_search -= len(s) + 1 - len(pat)
if self._max_search < 0:
self.close()
return False
self._write_buffer(s[1 - len(pat):])
return False
self._write_buffer(s[p + len(pat):])
return True
def read_crypto_block3a(self, s):
if not self._search_for_pattern(s, self.encrypter.block3a):
return -1, self.read_crypto_block3a # wait for more data
return 20, self.read_crypto_block3b
def read_crypto_block3b(self, s):
srs = self.multihandler.singlerawservers
for k in srs:
if self.encrypter.test_skey(s, k):
srs[k]._external_connection_made(
self.connection, None, self.buffer,
encrypted=self.encrypter)
return True
return None
def read_options(self, s):
self.options = s
return 20, self.read_download_id
def read_download_id(self, s):
srs = self.multihandler.singlerawservers
if s in srs:
if srs[s].protocol == self.protocol:
srs[s]._external_connection_made(
self.connection, self.options, self.buffer)
return True
return None
def read_dead(self, s):
return None
def data_came_in(self, garbage, s):
self.read(s)
def _write_buffer(self, s):
self.buffer = s + self.buffer
def _read(self, s):
self.buffer += s
while True:
if self.closed:
return
# self.next_len = # of characters function expects
# or 0 = all characters in the buffer
# or -1 = wait for next read, then all characters in the buffer
if self.next_len <= 0:
m = self.buffer
self.buffer = ''
elif len(self.buffer) >= self.next_len:
m = self.buffer[:self.next_len]
self.buffer = self.buffer[self.next_len:]
else:
return
try:
x = self.next_func(m)
except:
self.next_len, self.next_func = 1, self.read_dead
raise
if x is None:
self.close()
return
if x:
self.complete = True
return
self.next_len, self.next_func = x
if self.next_len < 0: # already checked buffer
return # wait for additional data
def connection_flushed(self, ss):
pass
def connection_lost(self, ss):
self.closed = True
class MultiHandler:
def __init__(self, rawserver, doneflag, config):
self.rawserver = rawserver
self.masterdoneflag = doneflag
self.config = config
self.singlerawservers = {}
self.connections = {}
self.taskqueues = {}
def newRawServer(self, info_hash, doneflag, protocol=protocol_name):
new = SingleRawServer(info_hash, self, doneflag, protocol)
self.singlerawservers[info_hash] = new
return new
def shutdown_torrent(self, info_hash):
self.singlerawservers[info_hash]._shutdown()
del self.singlerawservers[info_hash]
def listen_forever(self):
self.rawserver.listen_forever(self)
for srs in self.singlerawservers.itervalues():
srs.finished = True
srs.running = False
srs.doneflag.set()
### RawServer handler functions ###
# be wary of name collisions
def external_connection_made(self, ss):
NewSocketHandler(self, ss)
|
the-stack_0_13224 | # coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
import datetime
import threading
import time
import pandas as pd
from QUANTAXIS.QAUtil.QALogs import QA_util_log_info
def QA_util_time_now():
"""
返回当前时间
:return: 类型datetime.datetime
"""
return datetime.datetime.now()
def QA_util_date_today():
"""
返回当前日期
:return: 类型datetime.date
"""
return datetime.date.today()
def QA_util_today_str():
"""
返回今天的日期字符串
:return: 类型字符串 2011-11-11
"""
dt = QA_util_date_today()
return QA_util_datetime_to_strdate(dt)
def QA_util_date_str2int(date):
"""
日期字符串 '2011-09-11' 变换成 整数 20110911
日期字符串 '2018-12-01' 变换成 整数 20181201
:param date: str日期字符串
:return: 类型int
"""
# return int(str(date)[0:4] + str(date)[5:7] + str(date)[8:10])
if isinstance(date, str):
return int(str().join(date.split('-')))
elif isinstance(date, int):
return date
def QA_util_date_int2str(int_date):
"""
类型datetime.datatime
:param date: int 8位整数
:return: 类型str
"""
date = str(int_date)
if len(date) == 8:
return str(date[0:4] + '-' + date[4:6] + '-' + date[6:8])
elif len(date) == 10:
return date
def QA_util_to_datetime(time):
"""
字符串 '2018-01-01' 转变成 datatime 类型
:param time: 字符串str -- 格式必须是 2018-01-01 ,长度10
:return: 类型datetime.datatime
"""
if len(str(time)) == 10:
_time = '{} 00:00:00'.format(time)
elif len(str(time)) == 19:
_time = str(time)
else:
QA_util_log_info('WRONG DATETIME FORMAT {}'.format(time))
return datetime.datetime.strptime(_time, '%Y-%m-%d %H:%M:%S')
def QA_util_datetime_to_strdate(dt):
"""
:param dt: pythone datetime.datetime
:return: 1999-02-01 string type
"""
strdate = "%04d-%02d-%02d" % (dt.year, dt.month, dt.day)
return strdate
def QA_util_datetime_to_strdatetime(dt):
"""
:param dt: pythone datetime.datetime
:return: 1999-02-01 09:30:91 string type
"""
strdatetime = "%04d-%02d-%02d %02d:%02d:%02d" % (
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second
)
return strdatetime
def QA_util_date_stamp(date):
"""
字符串 '2018-01-01' 转变成 float 类型时间 类似 time.time() 返回的类型
:param date: 字符串str -- 格式必须是 2018-01-01 ,长度10
:return: 类型float
"""
datestr = str(date)[0:10]
date = time.mktime(time.strptime(datestr, '%Y-%m-%d'))
return date
def QA_util_time_stamp(time_):
"""
字符串 '2018-01-01 00:00:00' 转变成 float 类型时间 类似 time.time() 返回的类型
:param time_: 字符串str -- 数据格式 最好是%Y-%m-%d %H:%M:%S 中间要有空格
:return: 类型float
"""
if len(str(time_)) == 10:
# yyyy-mm-dd格式
return time.mktime(time.strptime(time_, '%Y-%m-%d'))
elif len(str(time_)) == 16:
# yyyy-mm-dd hh:mm格式
return time.mktime(time.strptime(time_, '%Y-%m-%d %H:%M'))
else:
timestr = str(time_)[0:19]
return time.mktime(time.strptime(timestr, '%Y-%m-%d %H:%M:%S'))
def QA_util_pands_timestamp_to_date(pandsTimestamp):
"""
转换 pandas 的时间戳 到 datetime.date类型
:param pandsTimestamp: 类型 pandas._libs.tslib.Timestamp
:return: datetime.datetime类型
"""
return pandsTimestamp.to_pydatetime().date()
def QA_util_pands_timestamp_to_datetime(pandsTimestamp):
"""
转换 pandas 的时间戳 到 datetime.datetime类型
:param pandsTimestamp: 类型 pandas._libs.tslib.Timestamp
:return: datetime.datetime类型
"""
return pandsTimestamp.to_pydatetime()
def QA_util_stamp2datetime(timestamp):
"""
datestamp转datetime
pandas转出来的timestamp是13位整数 要/1000
It’s common for this to be restricted to years from 1970 through 2038.
从1970年开始的纳秒到当前的计数 转变成 float 类型时间 类似 time.time() 返回的类型
:param timestamp: long类型
:return: 类型float
"""
try:
return datetime.datetime.fromtimestamp(timestamp)
except Exception as e:
# it won't work ??
try:
return datetime.datetime.fromtimestamp(timestamp / 1000)
except:
try:
return datetime.datetime.fromtimestamp(timestamp / 1000000)
except:
return datetime.datetime.fromtimestamp(timestamp / 1000000000)
#
def QA_util_ms_stamp(ms):
"""
直接返回不做处理
:param ms: long类型 -- tick count
:return: 返回ms
"""
return ms
def QA_util_date_valid(date):
"""
判断字符串是否是 1982-05-11 这种格式
:param date: date 字符串str -- 格式 字符串长度10
:return: boolean -- 格式是否正确
"""
try:
time.strptime(date, "%Y-%m-%d")
return True
except:
return False
def QA_util_realtime(strtime, client):
"""
查询数据库中的数据
:param strtime: strtime str字符串 -- 1999-12-11 这种格式
:param client: client pymongo.MongoClient类型 -- mongodb 数据库 从 QA_util_sql_mongo_setting 中 QA_util_sql_mongo_setting 获取
:return: Dictionary -- {'time_real': 时间,'id': id}
"""
time_stamp = QA_util_date_stamp(strtime)
coll = client.quantaxis.trade_date
temp_str = coll.find_one({'date_stamp': {"$gte": time_stamp}})
time_real = temp_str['date']
time_id = temp_str['num']
return {'time_real': time_real, 'id': time_id}
def QA_util_id2date(idx, client):
"""
从数据库中查询 通达信时间
:param idx: 字符串 -- 数据库index
:param client: pymongo.MongoClient类型 -- mongodb 数据库 从 QA_util_sql_mongo_setting 中 QA_util_sql_mongo_setting 获取
:return: Str -- 通达信数据库时间
"""
coll = client.quantaxis.trade_date
temp_str = coll.find_one({'num': idx})
return temp_str['date']
def QA_util_is_trade(date, code, client):
"""
判断是否是交易日
从数据库中查询
:param date: str类型 -- 1999-12-11 这种格式 10位字符串
:param code: str类型 -- 股票代码 例如 603658 , 6位字符串
:param client: pymongo.MongoClient类型 -- mongodb 数据库 从 QA_util_sql_mongo_setting 中 QA_util_sql_mongo_setting 获取
:return: Boolean -- 是否是交易时间
"""
coll = client.quantaxis.stock_day
date = str(date)[0:10]
is_trade = coll.find_one({'code': code, 'date': date})
try:
len(is_trade)
return True
except:
return False
def QA_util_get_date_index(date, trade_list):
"""
返回在trade_list中的index位置
:param date: str类型 -- 1999-12-11 这种格式 10位字符串
:param trade_list: ??
:return: ??
"""
return trade_list.index(date)
def QA_util_get_index_date(id, trade_list):
"""
:param id: :??
:param trade_list: ??
:return: ??
"""
return trade_list[id]
def QA_util_select_hours(time=None, gt=None, lt=None, gte=None, lte=None):
'quantaxis的时间选择函数,约定时间的范围,比如早上9点到11点'
if time is None:
__realtime = datetime.datetime.now()
else:
__realtime = time
fun_list = []
if gt != None:
fun_list.append('>')
if lt != None:
fun_list.append('<')
if gte != None:
fun_list.append('>=')
if lte != None:
fun_list.append('<=')
assert len(fun_list) > 0
true_list = []
try:
for item in fun_list:
if item == '>':
if __realtime.strftime('%H') > gt:
true_list.append(0)
else:
true_list.append(1)
elif item == '<':
if __realtime.strftime('%H') < lt:
true_list.append(0)
else:
true_list.append(1)
elif item == '>=':
if __realtime.strftime('%H') >= gte:
true_list.append(0)
else:
true_list.append(1)
elif item == '<=':
if __realtime.strftime('%H') <= lte:
true_list.append(0)
else:
true_list.append(1)
except:
return Exception
if sum(true_list) > 0:
return False
else:
return True
def QA_util_select_min(time=None, gt=None, lt=None, gte=None, lte=None):
"""
'quantaxis的时间选择函数,约定时间的范围,比如30分到59分'
:param time:
:param gt:
:param lt:
:param gte:
:param lte:
:return:
"""
if time is None:
__realtime = datetime.datetime.now()
else:
__realtime = time
fun_list = []
if gt != None:
fun_list.append('>')
if lt != None:
fun_list.append('<')
if gte != None:
fun_list.append('>=')
if lte != None:
fun_list.append('<=')
assert len(fun_list) > 0
true_list = []
try:
for item in fun_list:
if item == '>':
if __realtime.strftime('%M') > gt:
true_list.append(0)
else:
true_list.append(1)
elif item == '<':
if __realtime.strftime('%M') < lt:
true_list.append(0)
else:
true_list.append(1)
elif item == '>=':
if __realtime.strftime('%M') >= gte:
true_list.append(0)
else:
true_list.append(1)
elif item == '<=':
if __realtime.strftime('%M') <= lte:
true_list.append(0)
else:
true_list.append(1)
except:
return Exception
if sum(true_list) > 0:
return False
else:
return True
def QA_util_time_delay(time_=0):
"""
'这是一个用于复用/比如说@装饰器的延时函数\
使用threading里面的延时,为了是不阻塞进程\
有时候,同时发进去两个函数,第一个函数需要延时\
第二个不需要的话,用sleep就会阻塞掉第二个进程'
:param time_:
:return:
"""
def _exec(func):
threading.Timer(time_, func)
return _exec
def QA_util_calc_time(func, *args, **kwargs):
"""
'耗时长度的装饰器'
:param func:
:param args:
:param kwargs:
:return:
"""
_time = datetime.datetime.now()
func(*args, **kwargs)
print(datetime.datetime.now() - _time)
# return datetime.datetime.now() - _time
month_data = pd.date_range(
'1/1/1996',
'12/31/2023',
freq='Q-MAR'
).astype(str).tolist()
if __name__ == '__main__':
print(QA_util_time_stamp('2017-01-01 10:25:08'))
|
the-stack_0_13226 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The QQcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the rawtransaction RPCs.
Test the following RPCs:
- createrawtransaction
- signrawtransaction
- sendrawtransaction
- decoderawtransaction
- getrawtransaction
"""
from test_framework.test_framework import QQcoinTestFramework
from test_framework.util import *
class multidict(dict):
"""Dictionary that allows duplicate keys.
Constructed with a list of (key, value) tuples. When dumped by the json module,
will output invalid json with repeated keys, eg:
>>> json.dumps(multidict([(1,2),(1,2)])
'{"1": 2, "1": 2}'
Used to test calls to rpc methods with repeated keys in the json object."""
def __init__(self, x):
dict.__init__(self, x)
self.x = x
def items(self):
return self.x
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(QQcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-addresstype=legacy"], ["-addresstype=legacy"], ["-addresstype=legacy"]]
def setup_network(self, split=False):
super().setup_network()
connect_nodes_bi(self.nodes,0,2)
def run_test(self):
#prepare some coins for multiple *rawtransaction commands
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
# Test `createrawtransaction` required parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction)
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [])
# Test `createrawtransaction` invalid extra parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, False, 'foo')
# Test `createrawtransaction` invalid `inputs`
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {})
assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{}], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, vout must be positive", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': -1}], {})
assert_raises_rpc_error(-8, "Invalid parameter, sequence number is out of range", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 0, 'sequence': -1}], {})
# Test `createrawtransaction` invalid `outputs`
address = self.nodes[0].getnewaddress()
assert_raises_rpc_error(-3, "Expected type object", self.nodes[0].createrawtransaction, [], 'foo')
assert_raises_rpc_error(-8, "Data must be hexadecimal string", self.nodes[0].createrawtransaction, [], {'data': 'foo'})
assert_raises_rpc_error(-5, "Invalid QQcoin address", self.nodes[0].createrawtransaction, [], {'foo': 0})
assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].createrawtransaction, [], {address: 'foo'})
assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].createrawtransaction, [], {address: -1})
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)]))
# Test `createrawtransaction` invalid `locktime`
assert_raises_rpc_error(-3, "Expected type number", self.nodes[0].createrawtransaction, [], {}, 'foo')
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, -1)
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, 4294967296)
# Test `createrawtransaction` invalid `replaceable`
assert_raises_rpc_error(-3, "Expected type bool", self.nodes[0].createrawtransaction, [], {}, 0, 'foo')
#########################################
# sendrawtransaction with missing input #
#########################################
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransaction(rawtx)
# This will raise an exception since there are missing inputs
assert_raises_rpc_error(-25, "Missing inputs", self.nodes[2].sendrawtransaction, rawtx['hex'])
#####################################
# getrawtransaction with block hash #
#####################################
# make a tx by sending then generate 2 blocks; block1 has the tx in it
tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1)
block1, block2 = self.nodes[2].generate(2)
self.sync_all()
# We should be able to get the raw transaction by providing the correct block
gottx = self.nodes[0].getrawtransaction(tx, True, block1)
assert_equal(gottx['txid'], tx)
assert_equal(gottx['in_active_chain'], True)
# We should not have the 'in_active_chain' flag when we don't provide a block
gottx = self.nodes[0].getrawtransaction(tx, True)
assert_equal(gottx['txid'], tx)
assert 'in_active_chain' not in gottx
# We should not get the tx if we provide an unrelated block
assert_raises_rpc_error(-5, "No such transaction found", self.nodes[0].getrawtransaction, tx, True, block2)
# An invalid block hash should raise the correct errors
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, True)
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, "foobar")
assert_raises_rpc_error(-8, "parameter 3 must be of length 64", self.nodes[0].getrawtransaction, tx, True, "abcd1234")
assert_raises_rpc_error(-5, "Block hash not found", self.nodes[0].getrawtransaction, tx, True, "0000000000000000000000000000000000000000000000000000000000000000")
# Undo the blocks and check in_active_chain
self.nodes[0].invalidateblock(block1)
gottx = self.nodes[0].getrawtransaction(txid=tx, verbose=True, blockhash=block1)
assert_equal(gottx['in_active_chain'], False)
self.nodes[0].reconsiderblock(block1)
assert_equal(self.nodes[0].getbestblockhash(), block2)
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 BTC to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
addr3Obj = self.nodes[2].validateaddress(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS A INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransaction(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransaction(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# 2of2 test for combining transactions
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
mSigObjValid = self.nodes[2].validateaddress(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal) # the funds of a 2of2 multisig tx should not be marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx2['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "redeemScript" : mSigObjValid['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned1 = self.nodes[1].signrawtransaction(rawTx2, inputs)
self.log.info(rawTxPartialSigned1)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxPartialSigned2 = self.nodes[2].signrawtransaction(rawTx2, inputs)
self.log.info(rawTxPartialSigned2)
assert_equal(rawTxPartialSigned2['complete'], False) #node2 only has one key, can't comp. sign the tx
rawTxComb = self.nodes[2].combinerawtransaction([rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']])
self.log.info(rawTxComb)
self.nodes[2].sendrawtransaction(rawTxComb)
rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# decoderawtransaction tests
# witness transaction
encrawtx = "010000000001010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f50500000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, True) # decode as witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # force decode as non-witness transaction
# non-witness transaction
encrawtx = "01000000010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f505000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, False) # decode as non-witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
# getrawtransaction tests
# 1. valid parameters - only supply txid
txHash = rawTx["hash"]
assert_equal(self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex'])
# 2. valid parameters - supply txid and 0 for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex'])
# 3. valid parameters - supply txid and False for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, False), rawTxSigned['hex'])
# 4. valid parameters - supply txid and 1 for verbose.
# We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
assert_equal(self.nodes[0].getrawtransaction(txHash, 1)["hex"], rawTxSigned['hex'])
# 5. valid parameters - supply txid and True for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, True)["hex"], rawTxSigned['hex'])
# 6. invalid parameters - supply txid and string "Flase"
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, "Flase")
# 7. invalid parameters - supply txid and empty array
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, [])
# 8. invalid parameters - supply txid and empty dict
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, {})
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
# 9. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
# 10. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
if __name__ == '__main__':
RawTransactionsTest().main()
|
the-stack_0_13229 | # pylint: disable=no-self-use,invalid-name
import numpy
from flaky import flaky
from deep_qa.models.reading_comprehension import BidirectionalAttentionFlow
from deep_qa.common.params import Params
from ...common.test_case import DeepQaTestCase
class TestBidirectionalAttentionFlow(DeepQaTestCase):
@flaky
def test_trains_and_loads_correctly(self):
self.write_span_prediction_files()
args = Params({
'embedding_dim': {'words': 4, 'characters': 4},
'save_models': True,
'tokenizer': {'type': 'words and characters'},
'show_summary_with_masking_info': True,
})
self.ensure_model_trains_and_loads(BidirectionalAttentionFlow, args)
def test_get_best_span(self):
# Note that the best span cannot be (1, 0) since even though 0.3 * 0.5 is the greatest
# value, the end span index is constrained to occur after the begin span index.
span_begin_probs = numpy.array([0.1, 0.3, 0.05, 0.3, 0.25])
span_end_probs = numpy.array([0.5, 0.1, 0.2, 0.05, 0.15])
begin_end_idxs = BidirectionalAttentionFlow.get_best_span(span_begin_probs,
span_end_probs)
assert begin_end_idxs == (1, 2)
# Testing an edge case of the dynamic program here, for the order of when you update the
# best previous span position. We should not get (1, 1), because that's an empty span.
span_begin_probs = numpy.array([0.4, 0.5, 0.1])
span_end_probs = numpy.array([0.3, 0.6, 0.1])
begin_end_idxs = BidirectionalAttentionFlow.get_best_span(span_begin_probs,
span_end_probs)
assert begin_end_idxs == (0, 1)
# test higher-order input
# Note that the best span cannot be (1, 1) since even though 0.3 * 0.5 is the greatest
# value, the end span index is constrained to occur after the begin span index.
span_begin_probs = numpy.array([[0.1, 0.3, 0.05, 0.3, 0.25]])
span_end_probs = numpy.array([[0.1, 0.5, 0.2, 0.05, 0.15]])
begin_end_idxs = BidirectionalAttentionFlow.get_best_span(span_begin_probs,
span_end_probs)
assert begin_end_idxs == (1, 2)
|
the-stack_0_13231 | # dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 1024)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 1024),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=3, #4会超出内存
workers_per_gpu=2,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='leftImg8bit/train',
ann_dir='gtFine/train',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='leftImg8bit/val',
ann_dir='gtFine/val',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='leftImg8bit/test',
# img_dir='leftImg8bit/val',
ann_dir='gtFine/test',
# ann_dir='gtFine/val',
pipeline=test_pipeline))
|
the-stack_0_13232 | import random
import altair
import matplotlib.pyplot as plt
import pandas as pd
from bokeh.plotting import figure as bokeh_figure
from vizno.report import Report
xs = [random.random() for _ in range(100)]
ys = [x + random.random() * 0.1 for x in xs]
f = plt.figure()
ax = f.add_subplot(111)
ax.plot(xs, ys, ".")
ax.set_xlabel("Label")
chart = (
altair.Chart(
pd.DataFrame(
{
"a": xs,
"b": ys,
}
)
)
.mark_circle(size=20)
.encode(x="a", y="b")
)
plot = bokeh_figure(plot_width=400, plot_height=300)
plot.circle(xs, ys)
r = Report.magic(title="Magic report", description="A magically gathered report")
|
the-stack_0_13234 | import datetime
from dateutil.parser import parse
from mongoengine import DateTimeField, FileField
from mongoengine.connection import DEFAULT_CONNECTION_NAME
from mongoengine.python_support import str_types
import StringIO
from django.conf import settings
if settings.FILE_DB == settings.S3:
import crits.core.s3_tools as S3
class CritsDateTimeField(DateTimeField):
"""
Custom MongoEngine DateTimeField. Utilizes a transform such that if the
value passed in is a string we will convert it to a datetime.datetime
object, or if it is set to None we will use the current datetime (useful
when instantiating new objects and wanting the default dates to all be the
current datetime).
"""
def __set__(self, instance, value):
value = self.transform(value)
return super(CritsDateTimeField, self).__set__(instance, value)
def transform(self, value):
if value and isinstance(value, basestring):
return parse(value, fuzzy=True)
elif not value:
return datetime.datetime.now()
else:
return value
class S3Proxy(object):
"""
Custom proxy for MongoEngine which uses S3 to store binaries instead of
GridFS.
"""
def __init__(self, grid_id=None, key=None, instance=None,
db_alias=DEFAULT_CONNECTION_NAME, collection_name='fs'):
self.grid_id = grid_id # Store id for file
self.key = key
self.instance = instance
self.db_alias = db_alias
self.collection_name = collection_name
self.newfile = None # Used for partial writes
self.gridout = None
def __getattr__(self, name):
attrs = ('_fs', 'grid_id', 'key', 'instance', 'db_alias',
'collection_name', 'newfile', 'gridout')
if name in attrs:
return self.__getattribute__(name)
obj = self.get()
if name in dir(obj):
return getattr(obj, name)
raise AttributeError
def __get__(self, instance, value):
return self
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.grid_id)
def delete(self):
# Delete file from S3, FileField still remains
S3.delete_file_s3(self.grid_id,self.collection_name)
self.grid_id = None
self.gridout = None
self._mark_as_changed()
def get(self, id=None):
if id:
self.grid_id = id
if self.grid_id is None:
return None
try:
if self.gridout is None:
self.gridout = StringIO.StringIO(S3.get_file_s3(self.grid_id, self.collection_name))
return self.gridout
except:
return None
def put(self, file_obj, **kwargs):
if self.grid_id:
raise Exception('This document already has a file. Either delete '
'it or call replace to overwrite it')
self.grid_id = S3.put_file_s3(file_obj, self.collection_name)
self._mark_as_changed()
def read(self, size=-1):
gridout = self.get()
if gridout is None:
return None
else:
try:
return gridout.read(size)
except:
return ""
def _mark_as_changed(self):
"""Inform the instance that `self.key` has been changed"""
if self.instance:
self.instance._mark_as_changed(self.key)
class S3FileField(FileField):
"""
Custom FileField for MongoEngine which utilizes S3.
"""
def __init__(self, db_alias=DEFAULT_CONNECTION_NAME, collection_name="fs",
**kwargs):
super(S3FileField, self).__init__(db_alias, collection_name, **kwargs)
self.proxy_class = S3Proxy
def __set__(self, instance, value):
key = self.name
if ((hasattr(value, 'read') and not
isinstance(value, self.proxy_class)) or isinstance(value, str_types)):
# using "FileField() = file/string" notation
grid_file = instance._data.get(self.name)
# If a file already exists, delete it
if grid_file:
try:
grid_file.delete()
except:
pass
# Create a new file with the new data
grid_file.put(value)
else:
# Create a new proxy object as we don't already have one
instance._data[key] = self.proxy_class(key=key, instance=instance,
collection_name=self.collection_name)
instance._data[key].put(value)
else:
instance._data[key] = value
instance._mark_as_changed(key)
def getFileField(db_alias=DEFAULT_CONNECTION_NAME, collection_name="fs", **kwargs):
"""
Determine if the admin has configured CRITs to utilize GridFS or S3 for
binary storage.
"""
if settings.FILE_DB == settings.GRIDFS:
return FileField(db_alias, collection_name, **kwargs)
elif settings.FILE_DB == settings.S3:
return S3FileField(db_alias, collection_name, **kwargs)
|
the-stack_0_13236 | """Contains domain batch classes.
"""
__author__ = 'Paul Landes'
from typing import Tuple, Type, Any
from dataclasses import dataclass, field
import copy as cp
from zensols.config import Settings
from zensols.persist import persisted
from zensols.deeplearn.batch import (
DataPoint,
Batch,
BatchStash,
ManagerFeatureMapping,
FieldFeatureMapping,
BatchFeatureMapping,
)
from zensols.nlp import (
FeatureSentence, FeatureDocument, TokenAnnotatedFeatureSentence
)
from zensols.deeplearn.result import ResultsContainer
from zensols.deeplearn.vectorize import (
FeatureVectorizerManager, FeatureVectorizer
)
from zensols.deepnlp.batch import FeatureSentenceDataPoint
from zensols.deepnlp.classify import ClassificationPredictionMapper
@dataclass
class NERPredictionMapper(ClassificationPredictionMapper):
def _create_data_point(self, cls: Type[DataPoint],
feature: Any) -> DataPoint:
return cls(None, self.batch_stash, feature, True)
def _create_features(self, sent_text: str) -> Tuple[FeatureSentence]:
doc: FeatureDocument = self.vec_manager.parse(sent_text)
self._docs.append(doc)
return doc.sents
def map_results(self, result: ResultsContainer) -> Settings:
classes = self._map_classes(result)
return Settings(classes=tuple(classes), docs=tuple(self._docs))
@dataclass
class NERDataPoint(FeatureSentenceDataPoint):
is_pred: bool = field(default=False)
def __post_init__(self):
self.sent = TokenAnnotatedFeatureSentence(
self.sent.sent_tokens, self.sent.text, self.ents)
if self.is_pred:
self._map_syn(self.sent)
self._map_tag(self.sent)
def _map_syn(self, sent: FeatureSentence):
"""Map from spaCy POS tags to the corpus *syntactic chunk*."""
last = None
outs = set('CC .'.split())
for t in sent:
syn = 'NP'
tag = t.tag_
if tag.startswith('V') or tag == 'TO':
syn = 'VP'
elif tag == 'IN':
syn = 'PP'
elif tag in outs:
syn = 'O'
elif tag == 'ROOT':
last = None
if syn == 'O':
stag = syn
else:
stag = 'I' if last == syn else 'B'
stag = f'{stag}-{syn}'
last = syn
t.syn_ = stag
def _map_tag(self, sent: FeatureSentence):
stash: BatchStash = self.batch_stash
mng: FeatureVectorizerManager = \
stash.vectorizer_manager_set['language_feature_manager']
vec: FeatureVectorizer = mng['tag']
labs = set(vec.label_encoder.classes_)
for t in sent:
if t.tag_ not in labs:
t.tag_ = ','
@property
@persisted('_ents', transient=True)
def ents(self) -> Tuple[str]:
"""The label: the fourth the named entity tag."""
if self.is_pred:
return tuple([None] * len(self.sent))
else:
return tuple(map(lambda t: t.ent_, self.sent.token_iter()))
@property
def trans_doc(self) -> FeatureDocument:
"""The document used by the transformer vectorizers. Return ``None`` for
prediction data points to avoid vectorization.
"""
if self.is_pred:
return None
return self.doc
@dataclass
class NERBatch(Batch):
LANGUAGE_FEATURE_MANAGER_NAME = 'language_feature_manager'
GLOVE_50_EMBEDDING = 'glove_50_embedding'
GLOVE_300_EMBEDDING = 'glove_300_embedding'
WORD2VEC_300_EMBEDDING = 'word2vec_300_embedding'
TRANSFORMER_FIXED_EMBEDDING = 'transformer_fixed_embedding'
TRANSFORMER_TRAINABLE_EMBEDDING = 'transformer_trainable_embedding'
TRANSFORMER_TRAINABLE_MODEL_NAME = 'transformer_trainable'
EMBEDDING_ATTRIBUTES = {GLOVE_50_EMBEDDING, GLOVE_300_EMBEDDING,
WORD2VEC_300_EMBEDDING, TRANSFORMER_FIXED_EMBEDDING,
TRANSFORMER_TRAINABLE_EMBEDDING}
MAPPINGS = BatchFeatureMapping(
'ents',
[ManagerFeatureMapping(
'label_vectorizer_manager',
(FieldFeatureMapping('ents', 'entlabel', True, is_label=True),
FieldFeatureMapping('mask', 'mask', True, 'ents'),
)),
ManagerFeatureMapping(
LANGUAGE_FEATURE_MANAGER_NAME,
(FieldFeatureMapping('tags', 'tag', True, 'doc'),
FieldFeatureMapping('syns', 'syn', True, 'doc'),
FieldFeatureMapping(GLOVE_50_EMBEDDING, 'wvglove50', True, 'doc'),
FieldFeatureMapping(GLOVE_300_EMBEDDING, 'wvglove300', True, 'doc'),
FieldFeatureMapping(WORD2VEC_300_EMBEDDING, 'w2v300', True, 'doc'),
FieldFeatureMapping(TRANSFORMER_TRAINABLE_EMBEDDING, TRANSFORMER_TRAINABLE_MODEL_NAME, True, 'doc'),
FieldFeatureMapping('tags_expander', 'transformer_tags_expander', True, 'doc'),
FieldFeatureMapping('syns_expander', 'transformer_syns_expander', True, 'doc'),
FieldFeatureMapping('ents_trans', 'entlabel_trans', True, 'trans_doc', is_label=True),
),)])
TRANS_MAPPINGS = cp.deepcopy(MAPPINGS)
TRANS_MAPPINGS.label_attribute_name = 'ents_trans'
def _get_batch_feature_mappings(self) -> BatchFeatureMapping:
stash: BatchStash = self.batch_stash
if 'ents_trans' in stash.decoded_attributes:
maps = self.TRANS_MAPPINGS
else:
maps = self.MAPPINGS
return maps
|
the-stack_0_13238 | """deCONZ sensor platform tests."""
from copy import deepcopy
from homeassistant.components.deconz.const import CONF_ALLOW_CLIP_SENSOR
from homeassistant.components.deconz.gateway import get_gateway_from_config_entry
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_POWER,
STATE_UNAVAILABLE,
)
from .test_gateway import DECONZ_WEB_REQUEST, setup_deconz_integration
SENSORS = {
"1": {
"id": "Light sensor id",
"name": "Light level sensor",
"type": "ZHALightLevel",
"state": {"lightlevel": 30000, "dark": False},
"config": {"on": True, "reachable": True, "temperature": 10},
"uniqueid": "00:00:00:00:00:00:00:00-00",
},
"2": {
"id": "Presence sensor id",
"name": "Presence sensor",
"type": "ZHAPresence",
"state": {"presence": False},
"config": {},
"uniqueid": "00:00:00:00:00:00:00:01-00",
},
"3": {
"id": "Switch 1 id",
"name": "Switch 1",
"type": "ZHASwitch",
"state": {"buttonevent": 1000},
"config": {},
"uniqueid": "00:00:00:00:00:00:00:02-00",
},
"4": {
"id": "Switch 2 id",
"name": "Switch 2",
"type": "ZHASwitch",
"state": {"buttonevent": 1000},
"config": {"battery": 100},
"uniqueid": "00:00:00:00:00:00:00:03-00",
},
"5": {
"id": "Daylight sensor id",
"name": "Daylight sensor",
"type": "Daylight",
"state": {"daylight": True, "status": 130},
"config": {},
"uniqueid": "00:00:00:00:00:00:00:04-00",
},
"6": {
"id": "Power sensor id",
"name": "Power sensor",
"type": "ZHAPower",
"state": {"current": 2, "power": 6, "voltage": 3},
"config": {"reachable": True},
"uniqueid": "00:00:00:00:00:00:00:05-00",
},
"7": {
"id": "Consumption id",
"name": "Consumption sensor",
"type": "ZHAConsumption",
"state": {"consumption": 2, "power": 6},
"config": {"reachable": True},
"uniqueid": "00:00:00:00:00:00:00:06-00",
},
"8": {
"id": "CLIP light sensor id",
"name": "CLIP light level sensor",
"type": "CLIPLightLevel",
"state": {"lightlevel": 30000},
"config": {"reachable": True},
"uniqueid": "00:00:00:00:00:00:00:07-00",
},
}
async def test_no_sensors(hass, aioclient_mock):
"""Test that no sensors in deconz results in no sensor entities."""
await setup_deconz_integration(hass, aioclient_mock)
assert len(hass.states.async_all()) == 0
async def test_sensors(hass, aioclient_mock):
"""Test successful creation of sensor entities."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = deepcopy(SENSORS)
config_entry = await setup_deconz_integration(
hass, aioclient_mock, get_state_response=data
)
gateway = get_gateway_from_config_entry(hass, config_entry)
assert len(hass.states.async_all()) == 5
light_level_sensor = hass.states.get("sensor.light_level_sensor")
assert light_level_sensor.state == "999.8"
assert light_level_sensor.attributes["device_class"] == DEVICE_CLASS_ILLUMINANCE
assert hass.states.get("sensor.presence_sensor") is None
assert hass.states.get("sensor.switch_1") is None
assert hass.states.get("sensor.switch_1_battery_level") is None
assert hass.states.get("sensor.switch_2") is None
switch_2_battery_level = hass.states.get("sensor.switch_2_battery_level")
assert switch_2_battery_level.state == "100"
assert switch_2_battery_level.attributes["device_class"] == DEVICE_CLASS_BATTERY
assert hass.states.get("sensor.daylight_sensor") is None
power_sensor = hass.states.get("sensor.power_sensor")
assert power_sensor.state == "6"
assert power_sensor.attributes["device_class"] == DEVICE_CLASS_POWER
consumption_sensor = hass.states.get("sensor.consumption_sensor")
assert consumption_sensor.state == "0.002"
assert "device_class" not in consumption_sensor.attributes
assert hass.states.get("sensor.clip_light_level_sensor") is None
# Event signals new light level
state_changed_event = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "1",
"state": {"lightlevel": 2000},
}
gateway.api.event_handler(state_changed_event)
assert hass.states.get("sensor.light_level_sensor").state == "1.6"
# Event signals new battery level
state_changed_event = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "4",
"config": {"battery": 75},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
assert hass.states.get("sensor.switch_2_battery_level").state == "75"
await hass.config_entries.async_unload(config_entry.entry_id)
states = hass.states.async_all()
assert len(hass.states.async_all()) == 5
for state in states:
assert state.state == STATE_UNAVAILABLE
await hass.config_entries.async_remove(config_entry.entry_id)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 0
async def test_allow_clip_sensors(hass, aioclient_mock):
"""Test that CLIP sensors can be allowed."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = deepcopy(SENSORS)
config_entry = await setup_deconz_integration(
hass,
aioclient_mock,
options={CONF_ALLOW_CLIP_SENSOR: True},
get_state_response=data,
)
assert len(hass.states.async_all()) == 6
assert hass.states.get("sensor.clip_light_level_sensor").state == "999.8"
# Disallow clip sensors
hass.config_entries.async_update_entry(
config_entry, options={CONF_ALLOW_CLIP_SENSOR: False}
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 5
assert hass.states.get("sensor.clip_light_level_sensor") is None
# Allow clip sensors
hass.config_entries.async_update_entry(
config_entry, options={CONF_ALLOW_CLIP_SENSOR: True}
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 6
assert hass.states.get("sensor.clip_light_level_sensor")
async def test_add_new_sensor(hass, aioclient_mock):
"""Test that adding a new sensor works."""
config_entry = await setup_deconz_integration(hass, aioclient_mock)
gateway = get_gateway_from_config_entry(hass, config_entry)
assert len(hass.states.async_all()) == 0
state_added_event = {
"t": "event",
"e": "added",
"r": "sensors",
"id": "1",
"sensor": deepcopy(SENSORS["1"]),
}
gateway.api.event_handler(state_added_event)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert hass.states.get("sensor.light_level_sensor").state == "999.8"
async def test_add_battery_later(hass, aioclient_mock):
"""Test that a sensor without an initial battery state creates a battery sensor once state exist."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = {"1": deepcopy(SENSORS["3"])}
config_entry = await setup_deconz_integration(
hass, aioclient_mock, get_state_response=data
)
gateway = get_gateway_from_config_entry(hass, config_entry)
remote = gateway.api.sensors["1"]
assert len(hass.states.async_all()) == 0
assert len(gateway.events) == 1
assert len(remote._callbacks) == 2 # Event and battery tracker
remote.update({"config": {"battery": 50}})
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert len(gateway.events) == 1
assert len(remote._callbacks) == 2 # Event and battery entity
assert hass.states.get("sensor.switch_1_battery_level")
async def test_special_danfoss_battery_creation(hass, aioclient_mock):
"""Test the special Danfoss battery creation works.
Normally there should only be one battery sensor per device from deCONZ.
With specific Danfoss devices each endpoint can report its own battery state.
"""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = {
"1": {
"config": {
"battery": 70,
"heatsetpoint": 2300,
"offset": 0,
"on": True,
"reachable": True,
"schedule": {},
"schedule_on": False,
},
"ep": 1,
"etag": "982d9acc38bee5b251e24a9be26558e4",
"lastseen": "2021-02-15T12:23Z",
"manufacturername": "Danfoss",
"modelid": "0x8030",
"name": "0x8030",
"state": {
"lastupdated": "2021-02-15T12:23:07.994",
"on": False,
"temperature": 2307,
},
"swversion": "YYYYMMDD",
"type": "ZHAThermostat",
"uniqueid": "58:8e:81:ff:fe:00:11:22-01-0201",
},
"2": {
"config": {
"battery": 86,
"heatsetpoint": 2300,
"offset": 0,
"on": True,
"reachable": True,
"schedule": {},
"schedule_on": False,
},
"ep": 2,
"etag": "62f12749f9f51c950086aff37dd02b61",
"lastseen": "2021-02-15T12:23Z",
"manufacturername": "Danfoss",
"modelid": "0x8030",
"name": "0x8030",
"state": {
"lastupdated": "2021-02-15T12:23:22.399",
"on": False,
"temperature": 2316,
},
"swversion": "YYYYMMDD",
"type": "ZHAThermostat",
"uniqueid": "58:8e:81:ff:fe:00:11:22-02-0201",
},
"3": {
"config": {
"battery": 86,
"heatsetpoint": 2350,
"offset": 0,
"on": True,
"reachable": True,
"schedule": {},
"schedule_on": False,
},
"ep": 3,
"etag": "f50061174bb7f18a3d95789bab8b646d",
"lastseen": "2021-02-15T12:23Z",
"manufacturername": "Danfoss",
"modelid": "0x8030",
"name": "0x8030",
"state": {
"lastupdated": "2021-02-15T12:23:25.466",
"on": False,
"temperature": 2337,
},
"swversion": "YYYYMMDD",
"type": "ZHAThermostat",
"uniqueid": "58:8e:81:ff:fe:00:11:22-03-0201",
},
"4": {
"config": {
"battery": 85,
"heatsetpoint": 2300,
"offset": 0,
"on": True,
"reachable": True,
"schedule": {},
"schedule_on": False,
},
"ep": 4,
"etag": "eea97adf8ce1b971b8b6a3a31793f96b",
"lastseen": "2021-02-15T12:23Z",
"manufacturername": "Danfoss",
"modelid": "0x8030",
"name": "0x8030",
"state": {
"lastupdated": "2021-02-15T12:23:41.939",
"on": False,
"temperature": 2333,
},
"swversion": "YYYYMMDD",
"type": "ZHAThermostat",
"uniqueid": "58:8e:81:ff:fe:00:11:22-04-0201",
},
"5": {
"config": {
"battery": 83,
"heatsetpoint": 2300,
"offset": 0,
"on": True,
"reachable": True,
"schedule": {},
"schedule_on": False,
},
"ep": 5,
"etag": "1f7cd1a5d66dc27ac5eb44b8c47362fb",
"lastseen": "2021-02-15T12:23Z",
"manufacturername": "Danfoss",
"modelid": "0x8030",
"name": "0x8030",
"state": {"lastupdated": "none", "on": False, "temperature": 2325},
"swversion": "YYYYMMDD",
"type": "ZHAThermostat",
"uniqueid": "58:8e:81:ff:fe:00:11:22-05-0201",
},
}
await setup_deconz_integration(hass, aioclient_mock, get_state_response=data)
assert len(hass.states.async_all()) == 10
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 5
async def test_air_quality_sensor(hass, aioclient_mock):
"""Test successful creation of air quality sensor entities."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = {
"0": {
"config": {"on": True, "reachable": True},
"ep": 2,
"etag": "c2d2e42396f7c78e11e46c66e2ec0200",
"lastseen": "2020-11-20T22:48Z",
"manufacturername": "BOSCH",
"modelid": "AIR",
"name": "Air quality",
"state": {
"airquality": "poor",
"airqualityppb": 809,
"lastupdated": "2020-11-20T22:48:00.209",
},
"swversion": "20200402",
"type": "ZHAAirQuality",
"uniqueid": "00:12:4b:00:14:4d:00:07-02-fdef",
}
}
await setup_deconz_integration(hass, aioclient_mock, get_state_response=data)
assert len(hass.states.async_all()) == 1
air_quality = hass.states.get("sensor.air_quality")
assert air_quality.state == "poor"
async def test_time_sensor(hass, aioclient_mock):
"""Test successful creation of time sensor entities."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = {
"0": {
"config": {"battery": 40, "on": True, "reachable": True},
"ep": 1,
"etag": "28e796678d9a24712feef59294343bb6",
"lastseen": "2020-11-22T11:26Z",
"manufacturername": "Danfoss",
"modelid": "eTRV0100",
"name": "Time",
"state": {
"lastset": "2020-11-19T08:07:08Z",
"lastupdated": "2020-11-22T10:51:03.444",
"localtime": "2020-11-22T10:51:01",
"utc": "2020-11-22T10:51:01Z",
},
"swversion": "20200429",
"type": "ZHATime",
"uniqueid": "cc:cc:cc:ff:fe:38:4d:b3-01-000a",
}
}
await setup_deconz_integration(hass, aioclient_mock, get_state_response=data)
assert len(hass.states.async_all()) == 2
time = hass.states.get("sensor.time")
assert time.state == "2020-11-19T08:07:08Z"
time_battery = hass.states.get("sensor.time_battery_level")
assert time_battery.state == "40"
async def test_unsupported_sensor(hass, aioclient_mock):
"""Test that unsupported sensors doesn't break anything."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = {
"0": {"type": "not supported", "name": "name", "state": {}, "config": {}}
}
await setup_deconz_integration(hass, aioclient_mock, get_state_response=data)
assert len(hass.states.async_all()) == 1
unsupported_sensor = hass.states.get("sensor.name")
assert unsupported_sensor.state == "unknown"
|
the-stack_0_13239 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.runtime.north.plugins.storage.storage_base import StorageBase
from calvin.utilities import calvinlogger
from calvin.utilities import calvinconfig
from calvin.utilities.calvin_callback import CalvinCB
from calvin.utilities import calvinuuid
_conf = calvinconfig.get()
_log = calvinlogger.get_logger(__name__)
class StorageProxy(StorageBase):
""" Implements a storage that asks a master node, this is the client class"""
def __init__(self, node):
self.master_uri = _conf.get(None, 'storage_proxy')
self.node = node
self.tunnel = None
self.replies = {}
_log.debug("PROXY init for %s", self.master_uri)
super(StorageProxy, self).__init__()
def start(self, iface='', network='', bootstrap=[], cb=None, name=None):
"""
Starts the service if its needed for the storage service
cb is the callback called when the start is finished
"""
_log.debug("PROXY start")
self.node.network.join([self.master_uri], CalvinCB(self._start_link_cb, org_cb=cb))
def _start_link_cb(self, status, uri, peer_node_id, org_cb):
_log.analyze(self.node.id, "+", {'status': str(status)}, peer_node_id=peer_node_id)
if status == "NACK":
if org_cb:
org_cb(False)
return
# Got link set up tunnel
self.master_id = peer_node_id
self.tunnel = self.node.proto.tunnel_new(self.master_id, 'storage', {})
self.tunnel.register_tunnel_down(CalvinCB(self.tunnel_down, org_cb=org_cb))
self.tunnel.register_tunnel_up(CalvinCB(self.tunnel_up, org_cb=org_cb))
self.tunnel.register_recv(self.tunnel_recv_handler)
def tunnel_down(self, org_cb):
""" Callback that the tunnel is not accepted or is going down """
if not self.tunnel:
return True
_log.analyze(self.node.id, "+ CLIENT", {'tunnel_id': self.tunnel.id})
self.tunnel = None
# FIXME assumes that the org_cb is the callback given by storage when starting, can only be called once
# not future up/down
if org_cb:
org_cb(False)
# We should always return True which sends an ACK on the destruction of the tunnel
return True
def tunnel_up(self, org_cb):
""" Callback that the tunnel is working """
if not self.tunnel:
return True
_log.analyze(self.node.id, "+ CLIENT", {'tunnel_id': self.tunnel.id})
# FIXME assumes that the org_cb is the callback given by storage when starting, can only be called once
# not future up/down
if org_cb:
org_cb(True)
# We should always return True which sends an ACK on the destruction of the tunnel
return True
def tunnel_recv_handler(self, payload):
""" Gets called when a storage master replies"""
_log.analyze(self.node.id, "+ CLIENT", {'payload': payload})
if 'msg_uuid' in payload and payload['msg_uuid'] in self.replies and 'cmd' in payload and payload['cmd']=='REPLY':
self.replies.pop(payload['msg_uuid'])(**{k: v for k, v in payload.iteritems() if k in ('key', 'value')})
def send(self, cmd, msg, cb):
msg_id = calvinuuid.uuid("MSGID")
self.replies[msg_id] = cb
msg['msg_uuid'] = msg_id
self.tunnel.send(dict(msg, cmd=cmd, msg_uuid=msg_id))
def set(self, key, value, cb=None):
"""
Set a key, value pair in the storage
"""
_log.analyze(self.node.id, "+ CLIENT", {'key': key, 'value': value})
self.send(cmd='SET',msg={'key':key, 'value': value}, cb=cb)
def get(self, key, cb=None):
"""
Gets a value from the storage
"""
_log.analyze(self.node.id, "+ CLIENT", {'key': key})
self.send(cmd='GET',msg={'key':key}, cb=cb)
def get_concat(self, key, cb=None):
"""
Gets a value from the storage
"""
_log.analyze(self.node.id, "+ CLIENT", {'key': key})
self.send(cmd='GET_CONCAT',msg={'key':key}, cb=cb)
def append(self, key, value, cb=None):
_log.analyze(self.node.id, "+ CLIENT", {'key': key, 'value': value})
self.send(cmd='APPEND',msg={'key':key, 'value': value}, cb=cb)
def remove(self, key, value, cb=None):
_log.analyze(self.node.id, "+ CLIENT", {'key': key, 'value': value})
self.send(cmd='REMOVE',msg={'key':key, 'value': value}, cb=cb)
def bootstrap(self, addrs, cb=None):
_log.analyze(self.node.id, "+ CLIENT", None)
def stop(self, cb=None):
_log.analyze(self.node.id, "+ CLIENT", None)
if cb:
cb() |
the-stack_0_13241 | # Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For further info, check https://github.com/canonical/charmcraft
"""Tests for the commands infraestructure."""
import pytest
from charmcraft.cmdbase import CommandError, BaseCommand
from charmcraft.main import COMMAND_GROUPS
def test_commanderror_retcode_default():
"""The CommandError return code default."""
err = CommandError('problem')
assert err.retcode == 1
def test_commanderror_retcode_given():
"""The CommandError holds the return code."""
err = CommandError('problem', retcode=4)
assert err.retcode == 4
all_commands = list.__add__(*[commands for _, _, commands in COMMAND_GROUPS])
@pytest.mark.parametrize('command', all_commands)
@pytest.mark.parametrize('attrib', ['name', 'help_msg', 'overview'])
def test_basecommand_mandatory_attributes(command, attrib):
"""All commands must provide the mandatory attributes."""
assert getattr(command, attrib) is not None
def test_basecommand_holds_the_indicated_group():
"""BaseCommand subclasses ."""
class TestClass(BaseCommand):
help_msg = 'help message'
name = 'test'
group = 'test group'
tc = TestClass(group)
assert tc.group == group
def test_basecommand_fill_parser_optional():
"""BaseCommand subclasses are allowed to not override fill_parser."""
class TestClass(BaseCommand):
help_msg = 'help message'
name = 'test'
def __init__(self, group):
self.done = False
super().__init__(group)
def run(self, parsed_args):
self.done = True
tc = TestClass('group')
tc.run([])
assert tc.done
def test_basecommand_run_mandatory():
"""BaseCommand subclasses must override run."""
class TestClass(BaseCommand):
help_msg = 'help message'
name = 'test'
tc = TestClass('group')
with pytest.raises(NotImplementedError):
tc.run([])
|
the-stack_0_13242 | # (c) Copyright [2018-2021] Micro Focus or one of its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# |_ |~) _ _| _ /~\ _ |.
# |_)\/ |_)(_|(_|| \_/|_|(_|||
# /
# ____________ ______
# / __ `\ / /
# | \/ / / /
# |______ / / /
# |____/ / /
# _____________ / /
# \ / / /
# \ / / /
# \_______/ / /
# ______ / /
# \ / / /
# \ / / /
# \/ / /
# / /
# / /
# \ /
# \ /
# \/
# _
# \ / _ __|_. _ _ |_)
# \/ (/_| | |(_(_|| \/
# /
# VerticaPy is a Python library with scikit-like functionality to use to conduct
# data science projects on data stored in Vertica, taking advantage Vertica’s
# speed and built-in analytics and machine learning features. It supports the
# entire data science life cycle, uses a ‘pipeline’ mechanism to sequentialize
# data transformation operations, and offers beautiful graphical options.
#
# VerticaPy aims to solve all of these problems. The idea is simple: instead
# of moving data around for processing, VerticaPy brings the logic to the data.
#
#
# Modules
#
# Standard Python Modules
import math, decimal, datetime
from typing import Union
# Other Python Modules
from scipy.stats import chi2, norm, f
import numpy as np
# VerticaPy Modules
import verticapy
from verticapy.utilities import *
from verticapy.toolbox import *
from verticapy.learn.linear_model import LinearRegression
from verticapy import vDataFrame
# Statistical Tests & Tools
# ---#
def adfuller(
vdf: vDataFrame,
column: str,
ts: str,
by: list = [],
p: int = 1,
with_trend: bool = False,
regresults: bool = False,
):
"""
---------------------------------------------------------------------------
Augmented Dickey Fuller test (Time Series stationarity).
Parameters
----------
vdf: vDataFrame
Input vDataFrame.
column: str
Input vcolumn to test.
ts: str
vcolumn used as timeline. It will be to use to order the data. It can be
a numerical or type date like (date, datetime, timestamp...) vcolumn.
by: list, optional
vcolumns used in the partition.
p: int, optional
Number of lags to consider in the test.
with_trend: bool, optional
Adds a trend in the Regression.
regresults: bool, optional
If True, the full regression results are returned.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
def critical_value(alpha, N, with_trend):
if not (with_trend):
if N <= 25:
if alpha == 0.01:
return -3.75
elif alpha == 0.10:
return -2.62
elif alpha == 0.025:
return -3.33
else:
return -3.00
elif N <= 50:
if alpha == 0.01:
return -3.58
elif alpha == 0.10:
return -2.60
elif alpha == 0.025:
return -3.22
else:
return -2.93
elif N <= 100:
if alpha == 0.01:
return -3.51
elif alpha == 0.10:
return -2.58
elif alpha == 0.025:
return -3.17
else:
return -2.89
elif N <= 250:
if alpha == 0.01:
return -3.46
elif alpha == 0.10:
return -2.57
elif alpha == 0.025:
return -3.14
else:
return -2.88
elif N <= 500:
if alpha == 0.01:
return -3.44
elif alpha == 0.10:
return -2.57
elif alpha == 0.025:
return -3.13
else:
return -2.87
else:
if alpha == 0.01:
return -3.43
elif alpha == 0.10:
return -2.57
elif alpha == 0.025:
return -3.12
else:
return -2.86
else:
if N <= 25:
if alpha == 0.01:
return -4.38
elif alpha == 0.10:
return -3.24
elif alpha == 0.025:
return -3.95
else:
return -3.60
elif N <= 50:
if alpha == 0.01:
return -4.15
elif alpha == 0.10:
return -3.18
elif alpha == 0.025:
return -3.80
else:
return -3.50
elif N <= 100:
if alpha == 0.01:
return -4.04
elif alpha == 0.10:
return -3.15
elif alpha == 0.025:
return -3.73
else:
return -5.45
elif N <= 250:
if alpha == 0.01:
return -3.99
elif alpha == 0.10:
return -3.13
elif alpha == 0.025:
return -3.69
else:
return -3.43
elif N <= 500:
if alpha == 0.01:
return 3.98
elif alpha == 0.10:
return -3.13
elif alpha == 0.025:
return -3.68
else:
return -3.42
else:
if alpha == 0.01:
return -3.96
elif alpha == 0.10:
return -3.12
elif alpha == 0.025:
return -3.66
else:
return -3.41
check_types(
[
("ts", ts, [str],),
("column", column, [str],),
("p", p, [int, float],),
("by", by, [list],),
("with_trend", with_trend, [bool],),
("regresults", regresults, [bool],),
("vdf", vdf, [vDataFrame,],),
],
)
columns_check([ts, column] + by, vdf)
ts = vdf_columns_names([ts], vdf)[0]
column = vdf_columns_names([column], vdf)[0]
by = vdf_columns_names(by, vdf)
schema = vdf._VERTICAPY_VARIABLES_["schema_writing"]
if not (schema):
schema = "public"
name = "{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_{}".format(
schema, gen_name([column]).upper()
)
relation_name = "{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}".format(
schema, gen_name([column]).upper()
)
try:
vdf._VERTICAPY_VARIABLES_["cursor"].execute(
"DROP MODEL IF EXISTS {}".format(name)
)
vdf._VERTICAPY_VARIABLES_["cursor"].execute(
"DROP VIEW IF EXISTS {}".format(relation_name)
)
except:
pass
lag = [
"LAG({}, 1) OVER ({}ORDER BY {}) AS lag1".format(
column, "PARTITION BY {}".format(", ".join(by)) if (by) else "", ts
)
]
lag += [
"LAG({}, {}) OVER ({}ORDER BY {}) - LAG({}, {}) OVER ({}ORDER BY {}) AS delta{}".format(
column,
i,
"PARTITION BY {}".format(", ".join(by)) if (by) else "",
ts,
column,
i + 1,
"PARTITION BY {}".format(", ".join(by)) if (by) else "",
ts,
i,
)
for i in range(1, p + 1)
]
lag += [
"{} - LAG({}, 1) OVER ({}ORDER BY {}) AS delta".format(
column, column, "PARTITION BY {}".format(", ".join(by)) if (by) else "", ts
)
]
query = "CREATE VIEW {} AS SELECT {}, {} AS ts FROM {}".format(
relation_name,
", ".join(lag),
"TIMESTAMPDIFF(SECOND, {}, MIN({}) OVER ())".format(ts, ts)
if vdf[ts].isdate()
else ts,
vdf.__genSQL__(),
)
vdf._VERTICAPY_VARIABLES_["cursor"].execute(query)
model = LinearRegression(
name, vdf._VERTICAPY_VARIABLES_["cursor"], solver="Newton", max_iter=1000
)
predictors = ["lag1"] + ["delta{}".format(i) for i in range(1, p + 1)]
if with_trend:
predictors += ["ts"]
model.fit(
relation_name, predictors, "delta",
)
coef = model.coef_
vdf._VERTICAPY_VARIABLES_["cursor"].execute("DROP MODEL IF EXISTS {}".format(name))
vdf._VERTICAPY_VARIABLES_["cursor"].execute(
"DROP VIEW IF EXISTS {}".format(relation_name)
)
if regresults:
return coef
coef = coef.transpose()
DF = coef.values["lag1"][0] / (max(coef.values["lag1"][1], 1e-99))
p_value = coef.values["lag1"][3]
count = vdf.shape()[0]
result = tablesample(
{
"index": [
"ADF Test Statistic",
"p_value",
"# Lags used",
"# Observations Used",
"Critical Value (1%)",
"Critical Value (2.5%)",
"Critical Value (5%)",
"Critical Value (10%)",
"Stationarity (alpha = 1%)",
],
"value": [
DF,
p_value,
p,
count,
critical_value(0.01, count, with_trend),
critical_value(0.025, count, with_trend),
critical_value(0.05, count, with_trend),
critical_value(0.10, count, with_trend),
DF < critical_value(0.01, count, with_trend) and p_value < 0.01,
],
}
)
return result
# ---#
def cochrane_orcutt(
model, vdf: Union[vDataFrame, str], ts: str, prais_winsten: bool = False, drop_tmp_model: bool = True,
):
"""
---------------------------------------------------------------------------
Performs a Cochrane-Orcutt estimation.
Parameters
----------
model: vModel
Linear regression object.
vdf: vDataFrame / str
Input relation.
ts: str
vcolumn of numeric or date-like type (date, datetime, timestamp, etc.)
used as the timeline and to order the data.
prais_winsten: bool, optional
If true, retains the first observation of the time series, increasing
precision and efficiency. This configuration is called the
Prais–Winsten estimation.
drop_tmp_model: bool, optional
If true, it drops the temporary model.
Returns
-------
model
A Linear Model with the different information stored as attributes:
- coef_ : Model's coefficients.
- pho_ : Cochrane-Orcutt pho.
- anova_table_ : ANOVA table.
- r2_ : R2
"""
check_types(
[("vdf", vdf, [vDataFrame, str,],),
("ts", ts, [vDataFrame, str,],),
("drop_tmp_model", drop_tmp_model, [bool,],),],
)
if isinstance(vdf, str):
vdf_tmp = vdf_from_relation(vdf, cursor=model.cursor)
else:
vdf_tmp = vdf.copy()
columns_check([ts], vdf_tmp)
schema, relation = schema_relation(model.name)
name = schema + ".VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_{}".format(
get_session(model.cursor)
)
param = model.get_params()
model_tmp = type(model)(name, model.cursor)
model_tmp.set_params(param)
X, y = model.X, model.y
print_info = verticapy.options["print_info"]
verticapy.options["print_info"] = False
if prais_winsten:
vdf_tmp = vdf_tmp[X + [y, ts]].dropna()
verticapy.options["print_info"] = print_info
prediction_name = "prediction_{}".format(get_session(vdf._VERTICAPY_VARIABLES_["cursor"]))
eps_name = "eps_{}".format(get_session(vdf._VERTICAPY_VARIABLES_["cursor"]))
model.predict(vdf_tmp, X=X, name=prediction_name,)
vdf_tmp[eps_name] = vdf_tmp[y] - vdf_tmp[prediction_name]
query = "SELECT SUM(num) / SUM(den) FROM (SELECT {} * LAG({}) OVER (ORDER BY {}) AS num, POWER({}, 2) AS den FROM {}) x".format(eps_name, eps_name, ts, eps_name, vdf_tmp.__genSQL__())
vdf.__executeSQL__(
query,
title="Computes the Cochrane Orcutt pho.",
)
pho = vdf_tmp._VERTICAPY_VARIABLES_["cursor"].fetchone()[0]
for elem in X + [y]:
new_val = "{} - {} * LAG({}) OVER (ORDER BY {})".format(elem, pho, elem, ts)
if prais_winsten:
new_val = "COALESCE({}, {} * {})".format(new_val, elem, (1 - pho ** 2) ** (0.5))
vdf_tmp[elem] = new_val
model_tmp.drop()
model_tmp.fit(vdf_tmp, X, y)
model_tmp.pho_ = pho
model_tmp.anova_table_ = model.regression_report("anova")
model_tmp.r2_ = model.score("r2")
if drop_tmp_model:
model_tmp.drop()
return model_tmp
# ---#
def durbin_watson(
vdf: vDataFrame, eps: str, ts: str, by: list = [],
):
"""
---------------------------------------------------------------------------
Durbin Watson test (residuals autocorrelation).
Parameters
----------
vdf: vDataFrame
Input vDataFrame.
eps: str
Input residual vcolumn.
ts: str
vcolumn used as timeline. It will be to use to order the data. It can be
a numerical or type date like (date, datetime, timestamp...) vcolumn.
by: list, optional
vcolumns used in the partition.
Returns
-------
float
Durbin Watson statistic
"""
check_types(
[
("ts", ts, [str],),
("eps", eps, [str],),
("by", by, [list],),
("vdf", vdf, [vDataFrame, str,],),
],
)
columns_check([eps] + [ts] + by, vdf)
eps = vdf_columns_names([eps], vdf)[0]
ts = vdf_columns_names([ts], vdf)[0]
by = vdf_columns_names(by, vdf)
query = "(SELECT et, LAG(et) OVER({}ORDER BY {}) AS lag_et FROM (SELECT {} AS et, {}{} FROM {}) VERTICAPY_SUBTABLE) VERTICAPY_SUBTABLE".format(
"PARTITION BY {} ".format(", ".join(by)) if (by) else "",
ts,
eps,
ts,
(", " + ", ".join(by)) if (by) else "",
vdf.__genSQL__(),
)
vdf.__executeSQL__(
"SELECT SUM(POWER(et - lag_et, 2)) / SUM(POWER(et, 2)) FROM {}".format(query),
title="Computes the Durbin Watson d.",
)
d = vdf._VERTICAPY_VARIABLES_["cursor"].fetchone()[0]
return d
# ---#
def endogtest(
vdf: vDataFrame, eps: str, X: list,
):
"""
---------------------------------------------------------------------------
Endogeneity test.
Parameters
----------
vdf: vDataFrame
Input vDataFrame.
eps: str
Input residual vcolumn.
X: list
Input Variables to test the endogeneity on.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types(
[("eps", eps, [str],), ("X", X, [list],), ("vdf", vdf, [vDataFrame, str,],),],
)
columns_check([eps] + X, vdf)
eps = vdf_columns_names([eps], vdf)[0]
X = vdf_columns_names(X, vdf)
from verticapy.learn.linear_model import LinearRegression
schema_writing = vdf._VERTICAPY_VARIABLES_["schema_writing"]
if not (schema_writing):
schema_writing = "public"
name = schema_writing + ".VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_{}".format(
get_session(vdf._VERTICAPY_VARIABLES_["cursor"])
)
model = LinearRegression(name, cursor=vdf._VERTICAPY_VARIABLES_["cursor"])
try:
model.fit(vdf, X, eps)
R2 = model.score("r2")
model.drop()
except:
try:
model.set_params({"solver": "bfgs"})
model.fit(vdf, X, eps)
R2 = model.score("r2")
model.drop()
except:
model.drop()
raise
n = vdf.shape()[0]
k = len(X)
LM = n * R2
lm_pvalue = chi2.sf(LM, k)
F = (n - k - 1) * R2 / (1 - R2) / k
f_pvalue = f.sf(F, k, n - k - 1)
result = tablesample(
{
"index": [
"Lagrange Multiplier Statistic",
"lm_p_value",
"F Value",
"f_p_value",
],
"value": [LM, lm_pvalue, F, f_pvalue],
}
)
return result
# ---#
def het_arch(
vdf: vDataFrame, eps: str, ts: str, by: list = [], p: int = 1,
):
"""
---------------------------------------------------------------------------
Engle’s Test for Autoregressive Conditional Heteroscedasticity (ARCH).
Parameters
----------
vdf: vDataFrame
Input vDataFrame.
eps: str
Input residual vcolumn.
ts: str
vcolumn used as timeline. It will be to use to order the data. It can be
a numerical or type date like (date, datetime, timestamp...) vcolumn.
by: list, optional
vcolumns used in the partition.
p: int, optional
Number of lags to consider in the test.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types(
[
("eps", eps, [str],),
("ts", ts, [str],),
("p", p, [int, float],),
("vdf", vdf, [vDataFrame, str,],),
],
)
columns_check([eps, ts] + by, vdf)
eps = vdf_columns_names([eps], vdf)[0]
ts = vdf_columns_names([ts], vdf)[0]
by = vdf_columns_names(by, vdf)
X = []
X_names = []
for i in range(0, p + 1):
X += [
"LAG(POWER({}, 2), {}) OVER({}ORDER BY {}) AS lag_{}".format(
eps, i, ("PARTITION BY " + ", ".join(by)) if (by) else "", ts, i
)
]
X_names += ["lag_{}".format(i)]
query = "(SELECT {} FROM {}) VERTICAPY_SUBTABLE".format(
", ".join(X), vdf.__genSQL__()
)
vdf_lags = vdf_from_relation(query, cursor=vdf._VERTICAPY_VARIABLES_["cursor"])
from verticapy.learn.linear_model import LinearRegression
schema_writing = vdf._VERTICAPY_VARIABLES_["schema_writing"]
if not (schema_writing):
schema_writing = "public"
name = schema_writing + ".VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_{}".format(
get_session(vdf._VERTICAPY_VARIABLES_["cursor"])
)
model = LinearRegression(name, cursor=vdf._VERTICAPY_VARIABLES_["cursor"])
try:
model.fit(vdf_lags, X_names[1:], X_names[0])
R2 = model.score("r2")
model.drop()
except:
try:
model.set_params({"solver": "bfgs"})
model.fit(vdf_lags, X_names[1:], X_names[0])
R2 = model.score("r2")
model.drop()
except:
model.drop()
raise
n = vdf.shape()[0]
k = len(X)
LM = (n - p) * R2
lm_pvalue = chi2.sf(LM, p)
F = (n - 2 * p - 1) * R2 / (1 - R2) / p
f_pvalue = f.sf(F, p, n - 2 * p - 1)
result = tablesample(
{
"index": [
"Lagrange Multiplier Statistic",
"lm_p_value",
"F Value",
"f_p_value",
],
"value": [LM, lm_pvalue, F, f_pvalue],
}
)
return result
# ---#
def het_breuschpagan(
vdf: vDataFrame, eps: str, X: list,
):
"""
---------------------------------------------------------------------------
Uses the Breusch-Pagan to test a model for heteroskedasticity.
Parameters
----------
vdf: vDataFrame
Input vDataFrame.
eps: str
Input residual vColumn.
X: list
The exogenous variables to test.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types(
[("eps", eps, [str],), ("X", X, [list],), ("vdf", vdf, [vDataFrame, str,],),],
)
columns_check([eps] + X, vdf)
eps = vdf_columns_names([eps], vdf)[0]
X = vdf_columns_names(X, vdf)
from verticapy.learn.linear_model import LinearRegression
schema_writing = vdf._VERTICAPY_VARIABLES_["schema_writing"]
if not (schema_writing):
schema_writing = "public"
name = schema_writing + ".VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_{}".format(
get_session(vdf._VERTICAPY_VARIABLES_["cursor"])
)
model = LinearRegression(name, cursor=vdf._VERTICAPY_VARIABLES_["cursor"])
vdf_copy = vdf.copy()
vdf_copy["VERTICAPY_TEMP_eps2"] = vdf_copy[eps] ** 2
try:
model.fit(vdf_copy, X, "VERTICAPY_TEMP_eps2")
R2 = model.score("r2")
model.drop()
except:
try:
model.set_params({"solver": "bfgs"})
model.fit(vdf_copy, X, "VERTICAPY_TEMP_eps2")
R2 = model.score("r2")
model.drop()
except:
model.drop()
raise
n = vdf.shape()[0]
k = len(X)
LM = n * R2
lm_pvalue = chi2.sf(LM, k)
F = (n - k - 1) * R2 / (1 - R2) / k
f_pvalue = f.sf(F, k, n - k - 1)
result = tablesample(
{
"index": [
"Lagrange Multiplier Statistic",
"lm_p_value",
"F Value",
"f_p_value",
],
"value": [LM, lm_pvalue, F, f_pvalue],
}
)
return result
# ---#
def het_goldfeldquandt(
vdf: vDataFrame, y: str, X: list, idx: int = 0, split: float = 0.5
):
"""
---------------------------------------------------------------------------
Goldfeld-Quandt homoscedasticity test.
Parameters
----------
vdf: vDataFrame
Input vDataFrame.
y: str
Response Column.
X: list
Exogenous Variables.
idx: int, optional
Column index of variable according to which observations are sorted
for the split.
split: float, optional
Float to indicate where to split (Example: 0.5 to split on the median).
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
def model_fit(input_relation, X, y, model):
var = []
for vdf_tmp in input_relation:
model.drop()
model.fit(vdf_tmp, X, y)
model.predict(vdf_tmp, name="verticapy_prediction")
vdf_tmp["residual_0"] = vdf_tmp[y] - vdf_tmp["verticapy_prediction"]
var += [vdf_tmp["residual_0"].var()]
model.drop()
return var
check_types(
[
("y", y, [str],),
("X", X, [list],),
("idx", idx, [int, float],),
("split", split, [int, float],),
("vdf", vdf, [vDataFrame, str,],),
],
)
columns_check([y] + X, vdf)
y = vdf_columns_names([y], vdf)[0]
X = vdf_columns_names(X, vdf)
split_value = vdf[X[idx]].quantile(split)
vdf_0_half = vdf.search(vdf[X[idx]] < split_value)
vdf_1_half = vdf.search(vdf[X[idx]] > split_value)
from verticapy.learn.linear_model import LinearRegression
schema_writing = vdf._VERTICAPY_VARIABLES_["schema_writing"]
if not (schema_writing):
schema_writing = "public"
name = schema_writing + ".VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_{}".format(
get_session(vdf._VERTICAPY_VARIABLES_["cursor"])
)
model = LinearRegression(name, cursor=vdf._VERTICAPY_VARIABLES_["cursor"])
try:
var0, var1 = model_fit([vdf_0_half, vdf_1_half], X, y, model)
except:
try:
model.set_params({"solver": "bfgs"})
var0, var1 = model_fit([vdf_0_half, vdf_1_half], X, y, model)
except:
model.drop()
raise
n, m = vdf_0_half.shape()[0], vdf_1_half.shape()[0]
F = var0 / var1
f_pvalue = f.sf(F, n, m)
result = tablesample({"index": ["F Value", "f_p_value",], "value": [F, f_pvalue],})
return result
# ---#
def het_white(
vdf: vDataFrame, eps: str, X: list,
):
"""
---------------------------------------------------------------------------
White’s Lagrange Multiplier Test for heteroscedasticity.
Parameters
----------
vdf: vDataFrame
Input vDataFrame.
eps: str
Input residual vcolumn.
X: str
Exogenous Variables to test the heteroscedasticity on.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types(
[("eps", eps, [str],), ("X", X, [list],), ("vdf", vdf, [vDataFrame, str,],),],
)
columns_check([eps] + X, vdf)
eps = vdf_columns_names([eps], vdf)[0]
X = vdf_columns_names(X, vdf)
X_0 = ["1"] + X
variables = []
variables_names = []
for i in range(len(X_0)):
for j in range(i, len(X_0)):
if i != 0 or j != 0:
variables += ["{} * {} AS var_{}_{}".format(X_0[i], X_0[j], i, j)]
variables_names += ["var_{}_{}".format(i, j)]
query = "(SELECT {}, POWER({}, 2) AS VERTICAPY_TEMP_eps2 FROM {}) VERTICAPY_SUBTABLE".format(
", ".join(variables), eps, vdf.__genSQL__()
)
vdf_white = vdf_from_relation(query, cursor=vdf._VERTICAPY_VARIABLES_["cursor"])
from verticapy.learn.linear_model import LinearRegression
schema_writing = vdf._VERTICAPY_VARIABLES_["schema_writing"]
if not (schema_writing):
schema_writing = "public"
name = schema_writing + ".VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_{}".format(
get_session(vdf._VERTICAPY_VARIABLES_["cursor"])
)
model = LinearRegression(name, cursor=vdf._VERTICAPY_VARIABLES_["cursor"])
try:
model.fit(vdf_white, variables_names, "VERTICAPY_TEMP_eps2")
R2 = model.score("r2")
model.drop()
except:
try:
model.set_params({"solver": "bfgs"})
model.fit(vdf_white, variables_names, "VERTICAPY_TEMP_eps2")
R2 = model.score("r2")
model.drop()
except:
model.drop()
raise
n = vdf.shape()[0]
if len(X) > 1:
k = 2 * len(X) + math.factorial(len(X)) / 2 / (math.factorial(len(X) - 2))
else:
k = 1
LM = n * R2
lm_pvalue = chi2.sf(LM, k)
F = (n - k - 1) * R2 / (1 - R2) / k
f_pvalue = f.sf(F, k, n - k - 1)
result = tablesample(
{
"index": [
"Lagrange Multiplier Statistic",
"lm_p_value",
"F Value",
"f_p_value",
],
"value": [LM, lm_pvalue, F, f_pvalue],
}
)
return result
# ---#
def jarque_bera(vdf: vDataFrame, column: str, alpha: float = 0.05):
"""
---------------------------------------------------------------------------
Jarque-Bera test (Distribution Normality).
Parameters
----------
vdf: vDataFrame
input vDataFrame.
column: str
Input vcolumn to test.
alpha: float, optional
Significance Level. Probability to accept H0.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types(
[
("column", column, [str],),
("alpha", alpha, [int, float],),
("vdf", vdf, [vDataFrame,],),
],
)
columns_check([column], vdf)
column = vdf_columns_names([column], vdf)[0]
jb, kurtosis, skewness, n = (
vdf[column].agg(["jb", "kurtosis", "skewness", "count"]).values[column]
)
pvalue = chi2.sf(jb, 2)
result = False if pvalue < alpha else True
result = tablesample(
{
"index": [
"Jarque Bera Test Statistic",
"p_value",
"# Observations Used",
"Kurtosis - 3",
"Skewness",
"Distribution Normality",
],
"value": [jb, pvalue, n, kurtosis, skewness, result],
}
)
return result
# ---#
def kurtosistest(vdf: vDataFrame, column: str):
"""
---------------------------------------------------------------------------
Test whether the kurtosis is different from the normal distribution.
Parameters
----------
vdf: vDataFrame
input vDataFrame.
column: str
Input vcolumn to test.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types([("column", column, [str],), ("vdf", vdf, [vDataFrame,],),],)
columns_check([column], vdf)
column = vdf_columns_names([column], vdf)[0]
g2, n = vdf[column].agg(["kurtosis", "count"]).values[column]
mu1 = -6 / (n + 1)
mu2 = 24 * n * (n - 2) * (n - 3) / (((n + 1) ** 2) * (n + 3) * (n + 5))
gamma1 = (
6
* (n ** 2 - 5 * n + 2)
/ ((n + 7) * (n + 9))
* math.sqrt(6 * (n + 3) * (n + 5) / (n * (n - 2) * (n - 3)))
)
A = 6 + 8 / gamma1 * (2 / gamma1 + math.sqrt(1 + 4 / (gamma1 ** 2)))
B = (1 - 2 / A) / (1 + (g2 - mu1) / math.sqrt(mu2) * math.sqrt(2 / (A - 4)))
B = B ** (1 / 3) if B > 0 else (-B) ** (1 / 3)
Z2 = math.sqrt(9 * A / 2) * (1 - 2 / (9 * A) - B)
pvalue = 2 * norm.sf(abs(Z2))
result = tablesample({"index": ["Statistic", "p_value",], "value": [Z2, pvalue],})
return result
# ---#
def ljungbox(
vdf: vDataFrame,
column: str,
ts: str,
by: list = [],
p: int = 1,
alpha: float = 0.05,
box_pierce: bool = False,
):
"""
---------------------------------------------------------------------------
Ljung–Box test (whether any of a group of autocorrelations of a time series
are different from zero).
Parameters
----------
vdf: vDataFrame
Input vDataFrame.
column: str
Input vcolumn to test.
ts: str
vcolumn used as timeline. It will be to use to order the data. It can be
a numerical or type date like (date, datetime, timestamp...) vcolumn.
by: list, optional
vcolumns used in the partition.
p: int, optional
Number of lags to consider in the test.
alpha: float, optional
Significance Level. Probability to accept H0.
box_pierce: bool
If set to True, the Box-Pierce statistic will be used.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types(
[
("ts", ts, [str],),
("column", column, [str],),
("by", by, [list],),
("p", p, [int, float],),
("alpha", alpha, [int, float],),
("box_pierce", box_pierce, [bool],),
("vdf", vdf, [vDataFrame,],),
],
)
columns_check([column] + [ts] + by, vdf)
column = vdf_columns_names([column], vdf)[0]
ts = vdf_columns_names([ts], vdf)[0]
by = vdf_columns_names(by, vdf)
acf = vdf.acf(column=column, ts=ts, by=by, p=p, show=False)
if p >= 2:
acf = acf.values["value"]
else:
acf = [acf]
n = vdf[column].count()
name = (
"Ljung–Box Test Statistic" if not (box_pierce) else "Box-Pierce Test Statistic"
)
result = tablesample(
{"index": [], name: [], "p_value": [], "Serial Correlation": []}
)
Q = 0
for k in range(p):
div = n - k - 1 if not (box_pierce) else 1
mult = n * (n + 2) if not (box_pierce) else n
Q += mult * acf[k] ** 2 / div
pvalue = chi2.sf(Q, k + 1)
result.values["index"] += [k + 1]
result.values[name] += [Q]
result.values["p_value"] += [pvalue]
result.values["Serial Correlation"] += [True if pvalue < alpha else False]
return result
# ---#
def mkt(vdf: vDataFrame, column: str, ts: str, alpha: float = 0.05):
"""
---------------------------------------------------------------------------
Mann Kendall test (Time Series trend).
\u26A0 Warning : This Test is computationally expensive. It is using a CROSS
JOIN during the computation. The complexity is O(n * k), n
being the total count of the vDataFrame and k the number
of rows to use to do the test.
Parameters
----------
vdf: vDataFrame
Input vDataFrame.
column: str
Input vcolumn to test.
ts: str
vcolumn used as timeline. It will be to use to order the data. It can be
a numerical or type date like (date, datetime, timestamp...) vcolumn.
alpha: float, optional
Significance Level. Probability to accept H0.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types(
[
("ts", ts, [str],),
("column", column, [str],),
("alpha", alpha, [int, float],),
("vdf", vdf, [vDataFrame,],),
],
)
columns_check([column, ts], vdf)
column = vdf_columns_names([column], vdf)[0]
ts = vdf_columns_names([ts], vdf)[0]
table = "(SELECT {}, {} FROM {})".format(column, ts, vdf.__genSQL__())
query = "SELECT SUM(SIGN(y.{} - x.{})) FROM {} x CROSS JOIN {} y WHERE y.{} > x.{}".format(
column, column, table, table, ts, ts
)
vdf.__executeSQL__(query, title="Computes the Mann Kendall S.")
S = vdf._VERTICAPY_VARIABLES_["cursor"].fetchone()[0]
try:
S = float(S)
except:
S = None
n = vdf[column].count()
query = "SELECT SQRT(({} * ({} - 1) * (2 * {} + 5) - SUM(row * (row - 1) * (2 * row + 5))) / 18) FROM (SELECT row FROM (SELECT ROW_NUMBER() OVER (PARTITION BY {}) AS row FROM {}) VERTICAPY_SUBTABLE GROUP BY row) VERTICAPY_SUBTABLE".format(
n, n, n, column, vdf.__genSQL__()
)
vdf.__executeSQL__(query, title="Computes the Mann Kendall S standard deviation.")
STDS = vdf._VERTICAPY_VARIABLES_["cursor"].fetchone()[0]
try:
STDS = float(STDS)
except:
STDS = None
if STDS in (None, 0) or S == None:
return None
if S > 0:
ZMK = (S - 1) / STDS
trend = "increasing"
elif S < 0:
ZMK = (S + 1) / STDS
trend = "decreasing"
else:
ZMK = 0
trend = "no trend"
pvalue = 2 * norm.sf(abs(ZMK))
result = (
True
if (ZMK <= 0 and pvalue < alpha) or (ZMK >= 0 and pvalue < alpha)
else False
)
if not (result):
trend = "no trend"
result = tablesample(
{
"index": [
"Mann Kendall Test Statistic",
"S",
"STDS",
"p_value",
"Monotonic Trend",
"Trend",
],
"value": [ZMK, S, STDS, pvalue, result, trend],
}
)
return result
# ---#
def normaltest(vdf: vDataFrame, column: str):
"""
---------------------------------------------------------------------------
Test whether a sample differs from a normal distribution.
Parameters
----------
vdf: vDataFrame
input vDataFrame.
column: str
Input vcolumn to test.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
Z1, Z2 = skewtest(vdf, column)["value"][0], kurtosistest(vdf, column)["value"][0]
Z = Z1 ** 2 + Z2 ** 2
pvalue = chi2.sf(Z, 2)
result = tablesample({"index": ["Statistic", "p_value",], "value": [Z, pvalue],})
return result
# ---#
def seasonal_decompose(
vdf: vDataFrame,
column: str,
ts: str,
by: list = [],
period: int = -1,
polynomial_order: int = 1,
estimate_seasonality: bool = True,
rule: Union[str, datetime.timedelta] = None,
mult: bool = False,
two_sided: bool = False,
):
"""
---------------------------------------------------------------------------
Performs a seasonal time series decomposition.
Parameters
----------
vdf: vDataFrame
Input vDataFrame.
column: str
Input vcolumn to decompose.
ts: str
TS (Time Series) vcolumn to use to order the data. It can be of type date
or a numerical vcolumn.
by: list, optional
vcolumns used in the partition.
period: int, optional
Time Series period. It is used to retrieve the seasonality component.
if period <= 0, the seasonal component will be estimated using ACF. In
this case, polynomial_order must be greater than 0.
polynomial_order: int, optional
If greater than 0, the trend will be estimated using a polynomial of degree
'polynomial_order'. The parameter 'two_sided' will be ignored.
If equal to 0, the trend will be estimated using Moving Averages.
estimate_seasonality: bool, optional
If set to True, the seasonality will be estimated using cosine and sine
functions.
rule: str / time, optional
Interval to use to slice the time. For example, '5 minutes' will create records
separated by '5 minutes' time interval.
mult: bool, optional
If set to True, the decomposition type will be 'multiplicative'. Otherwise,
it is 'additive'.
two_sided: bool, optional
If set to True, a centered moving average is used for the trend isolation.
Otherwise only past values are used.
Returns
-------
vDataFrame
object containing (ts, column, TS seasonal part, TS trend, TS noise).
"""
if isinstance(by, str):
by = [by]
check_types(
[
("ts", ts, [str],),
("column", column, [str],),
("by", by, [list],),
("rule", rule, [str, datetime.timedelta,],),
("vdf", vdf, [vDataFrame,],),
("period", period, [int,],),
("mult", mult, [bool,],),
("two_sided", two_sided, [bool,],),
("polynomial_order", polynomial_order, [int,],),
("estimate_seasonality", estimate_seasonality, [bool,],),
],
)
assert period > 0 or polynomial_order > 0, ParameterError("Parameters 'polynomial_order' and 'period' can not be both null.")
columns_check([column, ts] + by, vdf)
ts, column, by = (
vdf_columns_names([ts], vdf)[0],
vdf_columns_names([column], vdf)[0],
vdf_columns_names(by, vdf),
)
if rule:
vdf_tmp = vdf.asfreq(ts=ts, rule=period, method={column: "linear"}, by=by)
else:
vdf_tmp = vdf[[ts, column]]
trend_name, seasonal_name, epsilon_name = (
"{}_trend".format(column[1:-1]),
"{}_seasonal".format(column[1:-1]),
"{}_epsilon".format(column[1:-1]),
)
by, by_tmp = "" if not (by) else "PARTITION BY " + ", ".join(vdf_columns_names(by, self)) + " ", by
if polynomial_order <= 0:
if two_sided:
if period == 1:
window = (-1, 1)
else:
if period % 2 == 0:
window = (-period / 2 + 1, period / 2)
else:
window = (int(-period / 2), int(period / 2))
else:
if period == 1:
window = (-2, 0)
else:
window = (-period + 1, 0)
vdf_tmp.rolling("avg", window, column, by_tmp, ts, trend_name)
else:
vdf_poly = vdf_tmp.copy()
X = []
for i in range(1, polynomial_order + 1):
vdf_poly[f"t_{i}"] = f"POWER(ROW_NUMBER() OVER ({by}ORDER BY {ts}), {i})"
X += [f"t_{i}"]
schema = vdf_poly._VERTICAPY_VARIABLES_["schema_writing"]
if not (schema):
schema = vdf_poly._VERTICAPY_VARIABLES_["schema"]
if not (schema):
schema = "public"
from verticapy.learn.linear_model import LinearRegression
model = LinearRegression(name="{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_{}".format(schema, get_session(vdf_poly._VERTICAPY_VARIABLES_["cursor"])),
cursor=vdf_poly._VERTICAPY_VARIABLES_["cursor"],
solver="bfgs",
max_iter=100,
tol=1e-6,)
model.drop()
model.fit(vdf_poly, X, column)
coefficients = model.coef_["coefficient"]
coefficients = [str(coefficients[0])] + [f"{coefficients[i]} * POWER(ROW_NUMBER() OVER({by}ORDER BY {ts}), {i})" if i != 1 else f"{coefficients[1]} * ROW_NUMBER() OVER({by}ORDER BY {ts})" for i in range(1, polynomial_order + 1)]
vdf_tmp[trend_name] = " + ".join(coefficients)
model.drop()
if mult:
vdf_tmp[seasonal_name] = f'{column} / NULLIFZERO("{trend_name}")'
else:
vdf_tmp[seasonal_name] = vdf_tmp[column] - vdf_tmp[trend_name]
if period <= 0:
acf = vdf_tmp.acf(column=seasonal_name, ts=ts, p=23, acf_type="heatmap", show=False)
period = int(acf["index"][1].split("_")[1])
if period == 1:
period = int(acf["index"][2].split("_")[1])
vdf_tmp["row_number_id"] = f"MOD(ROW_NUMBER() OVER ({by} ORDER BY {ts}), {period})"
if mult:
vdf_tmp[
seasonal_name
] = f"AVG({seasonal_name}) OVER (PARTITION BY row_number_id) / NULLIFZERO(AVG({seasonal_name}) OVER ())"
else:
vdf_tmp[
seasonal_name
] = f"AVG({seasonal_name}) OVER (PARTITION BY row_number_id) - AVG({seasonal_name}) OVER ()"
if estimate_seasonality:
vdf_seasonality = vdf_tmp.copy()
vdf_seasonality["t_cos"] = f"COS(2 * PI() * ROW_NUMBER() OVER ({by}ORDER BY {ts}) / {period})"
vdf_seasonality["t_sin"] = f"SIN(2 * PI() * ROW_NUMBER() OVER ({by}ORDER BY {ts}) / {period})"
X = ["t_cos", "t_sin",]
schema = vdf_seasonality._VERTICAPY_VARIABLES_["schema_writing"]
if not (schema):
schema = vdf_seasonality._VERTICAPY_VARIABLES_["schema"]
if not (schema):
schema = "public"
from verticapy.learn.linear_model import LinearRegression
model = LinearRegression(name="{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_{}".format(schema, get_session(vdf_seasonality._VERTICAPY_VARIABLES_["cursor"])),
cursor=vdf_seasonality._VERTICAPY_VARIABLES_["cursor"],
solver="bfgs",
max_iter=100,
tol=1e-6,)
model.drop()
model.fit(vdf_seasonality, X, seasonal_name)
coefficients = model.coef_["coefficient"]
vdf_tmp[seasonal_name] = f"{coefficients[0]} + {coefficients[1]} * COS(2 * PI() * ROW_NUMBER() OVER ({by}ORDER BY {ts}) / {period}) + {coefficients[2]} * SIN(2 * PI() * ROW_NUMBER() OVER ({by}ORDER BY {ts}) / {period})"
model.drop()
if mult:
vdf_tmp[
epsilon_name
] = f'{column} / NULLIFZERO("{trend_name}") / NULLIFZERO("{seasonal_name}")'
else:
vdf_tmp[epsilon_name] = (
vdf_tmp[column] - vdf_tmp[trend_name] - vdf_tmp[seasonal_name]
)
vdf_tmp["row_number_id"].drop()
return vdf_tmp
# ---#
def skewtest(vdf: vDataFrame, column: str):
"""
---------------------------------------------------------------------------
Test whether the skewness is different from the normal distribution.
Parameters
----------
vdf: vDataFrame
input vDataFrame.
column: str
Input vcolumn to test.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types([("column", column, [str],), ("vdf", vdf, [vDataFrame,],),],)
columns_check([column], vdf)
column = vdf_columns_names([column], vdf)[0]
g1, n = vdf[column].agg(["skewness", "count"]).values[column]
mu1 = 0
mu2 = 6 * (n - 2) / ((n + 1) * (n + 3))
gamma1 = 0
gamma2 = (
36 * (n - 7) * (n ** 2 + 2 * n - 5) / ((n - 2) * (n + 5) * (n + 7) * (n + 9))
)
W2 = math.sqrt(2 * gamma2 + 4) - 1
delta = 1 / math.sqrt(math.log(math.sqrt(W2)))
alpha2 = 2 / (W2 - 1)
Z1 = delta * math.asinh(g1 / math.sqrt(alpha2 * mu2))
pvalue = 2 * norm.sf(abs(Z1))
result = tablesample({"index": ["Statistic", "p_value",], "value": [Z1, pvalue],})
return result
# ---#
def variance_inflation_factor(
vdf: vDataFrame, X: list, X_idx: int = None,
):
"""
---------------------------------------------------------------------------
Computes the variance inflation factor (VIF). It can be used to detect
multicollinearity in an OLS Regression Analysis.
Parameters
----------
vdf: vDataFrame
Input vDataFrame.
X: list
Input Variables.
X_idx: int
Index of the exogenous variable in X. If left to None, a tablesample will
be returned with all the variables VIF.
Returns
-------
float
VIF.
"""
check_types(
[
("X_idx", X_idx, [int],),
("X", X, [list],),
("vdf", vdf, [vDataFrame, str,],),
],
)
columns_check(X, vdf)
X = vdf_columns_names(X, vdf)
if isinstance(X_idx, str):
columns_check([X_idx], vdf)
for i in range(len(X)):
if str_column(X[i]) == str_column(X_idx):
X_idx = i
break
if isinstance(X_idx, (int, float)):
X_r = []
for i in range(len(X)):
if i != X_idx:
X_r += [X[i]]
y_r = X[X_idx]
from verticapy.learn.linear_model import LinearRegression
schema_writing = vdf._VERTICAPY_VARIABLES_["schema_writing"]
if not (schema_writing):
schema_writing = "public"
name = schema_writing + ".VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_{}".format(
get_session(vdf._VERTICAPY_VARIABLES_["cursor"])
)
model = LinearRegression(name, cursor=vdf._VERTICAPY_VARIABLES_["cursor"])
try:
model.fit(vdf, X_r, y_r)
R2 = model.score("r2")
model.drop()
except:
try:
model.set_params({"solver": "bfgs"})
model.fit(vdf, X_r, y_r)
R2 = model.score("r2")
model.drop()
except:
model.drop()
raise
if 1 - R2 != 0:
return 1 / (1 - R2)
else:
return np.inf
elif X_idx == None:
VIF = []
for i in range(len(X)):
VIF += [variance_inflation_factor(vdf, X, i)]
return tablesample({"X_idx": X, "VIF": VIF})
else:
raise ParameterError(
f"Wrong type for Parameter X_idx.\nExpected integer, found {type(X_idx)}."
)
|
the-stack_0_13244 | import argparse
import collections
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageEnhance
import albumentations as A
import albumentations.pytorch
from tqdm.notebook import tqdm
import cv2
import re
import time
from retinanet import model
# from retinanet import retina
from retinanet.dataloader import *
from retinanet.anchors import Anchors
# from scheduler import *
#Torch
import torch
import torch.nn as nn
from torch.utils.data import Dataset,DataLoader
from torch.utils.data.sampler import SequentialSampler, RandomSampler
from torch.optim import Adam, lr_scheduler
import torch.optim as optim
from pycocotools.cocoeval import COCOeval
import json
import torch
def main(args=None):
parser = argparse.ArgumentParser(description='Simple paps training script for training a RetinaNet network.')
parser.add_argument('--batch_size', help='Number of batchs', type=int, default=0)
parser.add_argument('--test_data', help='test data file', default='data/test.npy')
parser.add_argument('--model_dir', help='pretrained model dir', default='trained_models/resnet50_640/model.pt')
parser.add_argument('--threshold', help='pretrained model dir', type=float, default=0.1)
parser = parser.parse_args(args)
GPU_NUM = 0 # 원하는 GPU 번호 입력
device = torch.device(f'cuda:{GPU_NUM}' if torch.cuda.is_available() else 'cpu')
torch.cuda.set_device(device) # change allocation of current GPU
print('device', device)
retinanet = model.resnet50(num_classes=2, device=device)
retinanet = torch.nn.DataParallel(retinanet, device_ids = [GPU_NUM], output_device=GPU_NUM).to(device)
retinanet.load_state_dict(torch.load(parser.model_dir))
# retinanet.to(device)
dataset_val = PapsDataset('data/', set_name='val_2class',
transform=val_transforms)
val_data_loader = DataLoader(
dataset_val,
batch_size=1,
shuffle=False,
num_workers=4,
collate_fn=collate_fn
)
retinanet.eval()
start_time = time.time()
threshold = parser.threshold
results = []
GT_results = []
image_ids = []
cnt = 0
for index, data in enumerate(tqdm(val_data_loader)) :
if cnt > 100 :
break
cnt += 1
with torch.no_grad():
images, tbox, tlabel, targets = data
batch_size = len(images)
# print(tbox)
# print(len(tbox[0]))
c, h, w = images[0].shape
images = torch.cat(images).view(-1, c, h, w).to(device)
outputs = retinanet(images)
scores, labels, boxes = (outputs)
scores = scores.cpu()
labels = labels.cpu()
boxes = boxes.cpu()
if boxes.shape[0] > 0:
# change to (x, y, w, h) (MS COCO standard)
boxes[:, 2] -= boxes[:, 0]
boxes[:, 3] -= boxes[:, 1]
# print(boxes)
# compute predicted labels and scores
#for box, score, label in zip(boxes[0], scores[0], labels[0]):
for box_id in range(boxes.shape[0]):
score = float(scores[box_id])
label = int(labels[box_id])
box = boxes[box_id, :]
# scores are sorted, so we can break
if score < threshold:
break
# append detection for each positively labeled class
image_result = {
'image_id' : dataset_val.image_ids[index],
'category_id' : dataset_val.label_to_coco_label(label),
'score' : float(score),
'bbox' : box.tolist(),
}
# append detection to results
results.append(image_result)
if len(tbox[0]) > 0:
# compute predicted labels and scores
#for box, score, label in zip(boxes[0], scores[0], labels[0]):
for box_id in range(len(tbox[0])):
score = float(0.99)
label = (tlabel[0][box_id])
box = list(tbox[0][box_id])
box[2] -= box[0]
box[3] -= box[1]
# append detection for each positively labeled class
image_result = {
'image_id' : dataset_val.image_ids[index],
'category_id' : dataset_val.label_to_coco_label(label),
'score' : float(score),
'bbox' : list(box),
}
# append detection to results
GT_results.append(image_result)
# append image to list of processed images
image_ids.append(dataset_val.image_ids[index])
# print progress
print('{}/{}'.format(index, len(dataset_val)), end='\r')
if not len(results):
print('No object detected')
print('GT_results', len(GT_results))
print('pred_results', len(results))
# write output
json.dump(results, open('trained_models/eval/{}_bbox_results.json'.format(dataset_val.set_name), 'w'), indent=4)
# write GT
json.dump(GT_results, open('trained_models/eval/{}_GTbbox_results.json'.format(dataset_val.set_name), 'w'), indent=4)
print('validation time :', time.time() - start_time)
# load results in COCO evaluation tool
coco_true = dataset_val.coco
coco_pred = coco_true.loadRes('trained_models/eval/{}_bbox_results.json'.format(dataset_val.set_name))
coco_gt = coco_true.loadRes('trained_models/eval/{}_GTbbox_results.json'.format(dataset_val.set_name))
# run COCO evaluation
# coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
coco_eval = COCOeval(coco_gt, coco_pred, 'bbox')
coco_eval.params.imgIds = image_ids
# coco_eval.params.catIds = [0]
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
if __name__ == '__main__':
main()
|
the-stack_0_13245 | # Copyright 2019 Thai-Son Nguyen
# Licensed under the Apache License, Version 2.0 (the "License")
import random
import struct
import os
import numpy as np
import torch
from torch.nn.utils.rnn import pack_sequence
from . import smart_open
class ScpStreamReader(object):
def __init__(self, scp_path, label_path=None, time_idx_path=None, sek=True, downsample=1,
sort_src=False, pack_src=False, max_len=10000, max_utt=4096,
mean_sub=False, zero_pad=0,
spec_drop=False, spec_bar=2, time_stretch=False, time_win=10000,
sub_seq=0.25, ss_static=False, shuffle=False, fp16=False):
self.scp_path = scp_path # path to the .scp file
self.label_path = label_path # path to the label file
self.time_idx_path = time_idx_path
self.downsample = downsample
self.shuffle = shuffle
self.sort_src = sort_src
self.pack_src = pack_src
self.max_len = max_len
self.sek = sek
self.mean_sub = mean_sub
self.zero_pad = zero_pad
self.spec_drop = spec_drop
self.spec_bar = spec_bar
self.time_stretch = time_stretch
self.time_win = time_win
self.ts_scale = 1.
self.sub_seq = sub_seq
self.ss_static = ss_static
self.fp16 = fp16
self.scp_dir = ''
self.scp_file = None
self.scp_pos = 0
self.max_utt = max_utt
# store features for each data partition
self.feat = []
self.label = []
# store all label in a dictionary
self.label_dic = None
self.time_idx = None
self.end_reading = False
# read the feature matrix of the next utterance
def read_all_label(self):
self.label_dic = {}
label_file = smart_open(self.label_path, 'r')
for line in label_file:
tokens = line.split()
utt_id = tokens[0]
if utt_id == '' or utt_id is None: continue
self.label_dic[utt_id] = [int(token) for token in tokens[1:]]
def read_time_index(self):
self.time_idx = {}
idx_file = smart_open(self.time_idx_path, 'r')
for line in idx_file:
tokens = line.split()
utt_id = tokens[0]
tokens = [int(token) for token in tokens[1:]]
l = len(tokens) // 2
j = 0
sid, eid = [], []
for i in range(len(self.label_dic[utt_id])):
if j < l-1 and i >= tokens[j+1]: j += 1
#eid.append(10000 if j == l-1 else (tokens[l+j+1]//4+1))
eid.append(10000 if j == l-1 else (tokens[l+j+1]//4+8))
sid.append(tokens[l+j]//4)
self.time_idx[utt_id] = (sid, eid)
def _read_string(self, ark_file):
s = ''
while True:
c = ark_file.read(1).decode('utf-8')
if c == ' ' or c == '': return s
s += c
def _read_integer(self, ark_file):
n = ord(ark_file.read(1))
return struct.unpack('>i', ark_file.read(n)[::-1])[0]
def initialize(self):
if self.scp_file is None:
self.scp_file = [line.rstrip('\n') for line in smart_open(self.scp_path, 'r')]
path = os.path.dirname(self.scp_path)
self.scp_dir = path + '/' if path != '' else None
self.scp_pos = 0
if self.shuffle: random.shuffle(self.scp_file)
if self.label_path is not None and self.label_dic is None:
self.read_all_label()
print("Loaded labels of %d utterances" % len(self.label_dic))
if self.time_idx_path is not None and self.time_idx is None:
self.read_time_index()
self.utt_index = 0
if self.max_utt < 0 and len(self.feat) > 0:
self.utt_count = len(self.feat)
else:
self.utt_count = 0
self.end_reading = False
self.ts_scale = 1.
# read the feature matrix of the next utterance
def read_next_utt(self):
if self.scp_pos >= len(self.scp_file):
return '', None
line = self.scp_file[self.scp_pos]
utt_id, path_pos = line.replace('\n','').split(' ')
path, pos = path_pos.split(':')
if not path.startswith('/') and self.scp_dir is not None:
path = self.scp_dir + path
self.scp_pos += 1
ark_file = smart_open(path, 'rb')
ark_file.seek(int(pos))
header = ark_file.read(2).decode('utf-8')
if header != "\0B":
print("Input .ark file is not binary"); exit(1)
format = self._read_string(ark_file)
if format == "FM":
rows = self._read_integer(ark_file)
cols = self._read_integer(ark_file)
#print rows, cols
utt_mat = struct.unpack("<%df" % (rows * cols), ark_file.read(rows*cols*4))
utt_mat = np.array(utt_mat, dtype="float32")
if self.fp16:
utt_mat = utt_mat.astype("float16")
if self.zero_pad > 0:
rows += self.zero_pad
utt_mat.resize(rows*cols)
utt_mat = np.reshape(utt_mat, (rows, cols))
else:
print("Unsupported .ark file with %s format" % format); exit(1)
ark_file.close()
return utt_id, utt_mat
def read_batch_utt(self, batch_size=32):
feats = []
ids = []
i = 0
while i < batch_size:
utt_id, utt_mat = self.read_next_utt()
if utt_id is None or utt_id == '': break
feats.append(utt_mat)
ids.append(utt_id)
i += 1
if len(feats) == 0: return ([], [], [])
lst = sorted(zip(feats, ids), key=lambda e : -e[0].shape[0])
src, ids = zip(*lst)
src = self.augment_src(src)
src = self.collate_src(src)
return (*src, ids)
def read_utt_label(self, utt_id, utt_mat):
if not utt_id in self.label_dic:
#print('Labels not found for %s' % utt_id)
return utt_mat, None
if len(utt_mat) >= self.max_len:
return utt_mat, None
utt_lbl = self.label_dic[utt_id]
if self.sek and utt_lbl is not None:
utt_lbl = [1] + [el+2 for el in utt_lbl] + [2]
if self.time_idx is None:
tid = (None, None)
else:
tid = self.time_idx[utt_id]
utt_lbl = (utt_lbl, tid)
return utt_mat, utt_lbl
def next_partition(self):
if self.end_reading:
return 0
self.feat = []
self.label = []
while self.max_utt < 0 or len(self.feat) < self.max_utt:
utt_id, utt_mat = self.read_next_utt()
if utt_id == '': # No more utterances available
self.end_reading = True
break
utt_mat, utt_lbl = self.read_utt_label(utt_id, utt_mat)
if utt_lbl is None: continue
self.feat.append(utt_mat)
self.label.append(utt_lbl)
return len(self.feat)
def available(self):
if self.utt_index >= self.utt_count:
self.utt_count = self.next_partition()
self.utt_index = 0
return self.utt_index < self.utt_count
def timefreq_drop_inst(self, inst, num=2, time_drop=0.25, freq_drop=0.25):
time_num, freq_num = inst.shape
freq_num = freq_num
time_len = 72
max_time = int(time_drop*time_num)
for i in range(num):
n = min(max_time, random.randint(0, time_len))
t0 = random.randint(0, time_num-n)
inst[t0:t0+n, :] = 0
max_time -= n
n = random.randint(0, int(freq_drop*freq_num))
f0 = random.randint(0, freq_num-n)
inst[:, f0:f0+n] = 0
return inst
def time_stretch_inst(self, inst, low=0.8, high=1.25, win=10000):
time_len = inst.shape[0]
ids = None
for i in range((time_len // win) + 1):
s = random.uniform(low, high)
e = min(time_len, win*(i+1))
r = np.arange(win*i, e-1, s, dtype=np.float32)
r = np.round(r).astype(np.int32)
ids = r if ids is None else np.concatenate((ids, r))
self.ts_scale = s
return inst[ids]
def mean_sub_inst(self, inst):
return inst - inst.mean(axis=0, keepdims=True)
def down_sample_inst(self, feature, cf=4):
feature = feature[:(feature.shape[0]//cf)*cf,:]
return feature.reshape(feature.shape[0]//cf, feature.shape[1]*cf)
def augment_src(self, src):
insts = []
for inst in src:
inst = self.time_stretch_inst(inst, win=self.time_win) if self.time_stretch else inst
inst = self.mean_sub_inst(inst) if self.mean_sub else inst
inst = self.timefreq_drop_inst(inst, num=self.spec_bar) if self.spec_drop else inst
inst = self.down_sample_inst(inst, self.downsample) if self.downsample > 1 else inst
insts.append(inst)
return insts
def collate_src(self, insts):
max_len = max(inst.shape[0] for inst in insts)
inputs = np.zeros((len(insts), max_len, insts[0].shape[1]))
masks = torch.zeros((len(insts), max_len), dtype=torch.uint8)
for idx, inst in enumerate(insts):
inputs[idx, :inst.shape[0], :] = inst
masks[idx, :inst.shape[0]] = 1
inputs = torch.HalfTensor(inputs) if self.fp16 else torch.FloatTensor(inputs)
return inputs, masks
def collate_src_pack(self, insts):
max_len = max(inst.shape[0] for inst in insts)
masks = torch.zeros((len(insts), max_len), dtype=torch.uint8)
inputs = []
for idx, inst in enumerate(insts):
inputs.append(torch.HalfTensor(inst) if self.fp16 else torch.FloatTensor(inst))
masks[idx, 0:inst.shape[0]] = 1
inputs = pack_sequence(inputs)
return inputs, masks
def collate_tgt(self, insts):
tgt, tid = zip(*insts)
max_len = max(len(inst) for inst in tgt)
labels = np.array([inst + [0] * (max_len - len(inst)) for inst in tgt])
labels = torch.LongTensor(labels)
max_len -= 1
sid, eid = zip(*tid)
if None not in sid:
sid = np.array([inst + [0] * (max_len - len(inst)) for inst in sid])
sid = torch.LongTensor(sid * self.ts_scale + 0.5)
if None not in eid:
eid = np.array([inst + [10000] * (max_len - len(inst)) for inst in eid])
eid = torch.LongTensor(eid * self.ts_scale + 0.5)
return labels, sid, eid
def next_batch(self, batch_size=16):
src = self.feat[self.utt_index:self.utt_index+batch_size]
tgt = self.label[self.utt_index:self.utt_index+batch_size]
src = self.augment_src(src)
if self.sort_src or self.pack_src:
lst = sorted(zip(src, tgt), key=lambda e : -e[0].shape[0])
src, tgt = zip(*lst)
self.utt_index += len(src)
src = self.collate_src(src) if not self.pack_src else self.collate_src_pack(src)
tgt = self.collate_tgt(tgt)
return (*src, *tgt)
def next(self, batch_input=3000):
l = len(self.feat)
j = i = self.utt_index
max_l = 0
while j < l:
max_l = max(max_l, self.feat[j].shape[0])
if j > i and max_l*(j-i+1) > batch_input: break
j += 1
last = (j==l)
src, tgt = self.feat[self.utt_index:j], self.label[self.utt_index:j]
src = self.augment_src(src)
if self.sort_src or self.pack_src:
lst = sorted(zip(src, tgt), key=lambda e : -e[0].shape[0])
src, tgt = zip(*lst)
seqs = len(src)
self.utt_index += seqs
src = self.collate_src(src) if not self.pack_src else self.collate_src_pack(src)
tgt = self.collate_tgt(tgt)
return (*src, *tgt, seqs, last)
class ScpBatchReader(ScpStreamReader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.shuffle = False
def next_partition(self):
if self.end_reading:
return 0
self.feat, self.label = [], []
feats, labels = [], []
num = 0
while num < self.max_utt:
if self.scp_pos >= len(self.scp_file):
self.end_reading = True; break
if self.scp_file[self.scp_pos].startswith('#'):
if len(feats) > 0:
self.feat.append(feats)
self.label.append(labels)
num += len(feats)
feats, labels = [], []
self.scp_pos += 1
continue
utt_id, utt_mat = self.read_next_utt()
if utt_id == '':
self.end_reading = True; break
utt_mat, utt_lbl = self.read_utt_label(utt_id, utt_mat)
if utt_lbl is None: continue
feats.append(utt_mat)
labels.append(utt_lbl)
return len(self.feat)
def next(self, batch_input=1):
src, tgt = self.feat[self.utt_index], self.label[self.utt_index]
src = self.augment_src(src)
if self.sort_src or self.pack_src:
lst = sorted(zip(src, tgt), key=lambda e : -e[0].shape[0])
src, tgt = zip(*lst)
seqs = len(src)
self.utt_index += 1
last = (self.utt_index == len(self.feat))
src = self.collate_src(src) if not self.pack_src else self.collate_src_pack(src)
tgt = self.collate_tgt(tgt)
return (*src, *tgt, seqs, last)
|
the-stack_0_13246 | from importlib import import_module
from os import path, listdir
from string import lower
from debug import logger
import paths
class MsgBase(object):
def encode(self):
self.data = {"": lower(type(self).__name__)}
def constructObject(data):
whitelist = ["message"]
if data[""] not in whitelist:
return None
try:
m = import_module("messagetypes." + data[""])
classBase = getattr(m, data[""].title())
except (NameError, ImportError):
logger.error("Don't know how to handle message type: \"%s\"", data[""], exc_info=True)
return None
try:
returnObj = classBase()
returnObj.decode(data)
except KeyError as e:
logger.error("Missing mandatory key %s", e)
return None
except:
logger.error("classBase fail", exc_info=True)
return None
else:
return returnObj
if paths.frozen is not None:
import messagetypes.message
import messagetypes.vote
else:
for mod in listdir(path.dirname(__file__)):
if mod == "__init__.py":
continue
splitted = path.splitext(mod)
if splitted[1] != ".py":
continue
try:
import_module("." + splitted[0], "messagetypes")
except ImportError:
logger.error("Error importing %s", mod, exc_info=True)
else:
logger.debug("Imported message type module %s", mod)
|
the-stack_0_13247 | from ..Qt import QtGui, QtCore, isQObjectAlive
from ..GraphicsScene import GraphicsScene
from ..Point import Point
from .. import functions as fn
import weakref
import operator
from ..util.lru_cache import LRUCache
class GraphicsItem(object):
"""
**Bases:** :class:`object`
Abstract class providing useful methods to GraphicsObject and GraphicsWidget.
(This is required because we cannot have multiple inheritance with QObject subclasses.)
A note about Qt's GraphicsView framework:
The GraphicsView system places a lot of emphasis on the notion that the graphics within the scene should be device independent--you should be able to take the same graphics and display them on screens of different resolutions, printers, export to SVG, etc. This is nice in principle, but causes me a lot of headache in practice. It means that I have to circumvent all the device-independent expectations any time I want to operate in pixel coordinates rather than arbitrary scene coordinates. A lot of the code in GraphicsItem is devoted to this task--keeping track of view widgets and device transforms, computing the size and shape of a pixel in local item coordinates, etc. Note that in item coordinates, a pixel does not have to be square or even rectangular, so just asking how to increase a bounding rect by 2px can be a rather complex task.
"""
_pixelVectorGlobalCache = LRUCache(100, 70)
_mapRectFromViewGlobalCache = LRUCache(100, 70)
def __init__(self, register=None):
if not hasattr(self, '_qtBaseClass'):
for b in self.__class__.__bases__:
if issubclass(b, QtGui.QGraphicsItem):
self.__class__._qtBaseClass = b
break
if not hasattr(self, '_qtBaseClass'):
raise Exception('Could not determine Qt base class for GraphicsItem: %s' % str(self))
self._pixelVectorCache = [None, None]
self._viewWidget = None
self._viewBox = None
self._connectedView = None
self._exportOpts = False ## If False, not currently exporting. Otherwise, contains dict of export options.
if register is not None and register:
warnings.warn(
"'register' argument is deprecated and does nothing",
DeprecationWarning, stacklevel=2
)
def getViewWidget(self):
"""
Return the view widget for this item.
If the scene has multiple views, only the first view is returned.
The return value is cached; clear the cached value with forgetViewWidget().
If the view has been deleted by Qt, return None.
"""
if self._viewWidget is None:
scene = self.scene()
if scene is None:
return None
views = scene.views()
if len(views) < 1:
return None
self._viewWidget = weakref.ref(self.scene().views()[0])
v = self._viewWidget()
if v is not None and not isQObjectAlive(v):
return None
return v
def forgetViewWidget(self):
self._viewWidget = None
def getViewBox(self):
"""
Return the first ViewBox or GraphicsView which bounds this item's visible space.
If this item is not contained within a ViewBox, then the GraphicsView is returned.
If the item is contained inside nested ViewBoxes, then the inner-most ViewBox is returned.
The result is cached; clear the cache with forgetViewBox()
"""
if self._viewBox is None:
p = self
while True:
try:
p = p.parentItem()
except RuntimeError: ## sometimes happens as items are being removed from a scene and collected.
return None
if p is None:
vb = self.getViewWidget()
if vb is None:
return None
else:
self._viewBox = weakref.ref(vb)
break
if hasattr(p, 'implements') and p.implements('ViewBox'):
self._viewBox = weakref.ref(p)
break
return self._viewBox() ## If we made it this far, _viewBox is definitely not None
def forgetViewBox(self):
self._viewBox = None
def deviceTransform(self, viewportTransform=None):
"""
Return the transform that converts local item coordinates to device coordinates (usually pixels).
Extends deviceTransform to automatically determine the viewportTransform.
"""
if self._exportOpts is not False and 'painter' in self._exportOpts: ## currently exporting; device transform may be different.
scaler = self._exportOpts.get('resolutionScale', 1.0)
return self.sceneTransform() * QtGui.QTransform(scaler, 0, 0, scaler, 1, 1)
if viewportTransform is None:
view = self.getViewWidget()
if view is None:
return None
viewportTransform = view.viewportTransform()
dt = self._qtBaseClass.deviceTransform(self, viewportTransform)
#xmag = abs(dt.m11())+abs(dt.m12())
#ymag = abs(dt.m21())+abs(dt.m22())
#if xmag * ymag == 0:
if dt.determinant() == 0: ## occurs when deviceTransform is invalid because widget has not been displayed
return None
else:
return dt
def viewTransform(self):
"""Return the transform that maps from local coordinates to the item's ViewBox coordinates
If there is no ViewBox, return the scene transform.
Returns None if the item does not have a view."""
view = self.getViewBox()
if view is None:
return None
if hasattr(view, 'implements') and view.implements('ViewBox'):
tr = self.itemTransform(view.innerSceneItem())
if isinstance(tr, tuple):
tr = tr[0] ## difference between pyside and pyqt
return tr
else:
return self.sceneTransform()
#return self.deviceTransform(view.viewportTransform())
def getBoundingParents(self):
"""Return a list of parents to this item that have child clipping enabled."""
p = self
parents = []
while True:
p = p.parentItem()
if p is None:
break
if p.flags() & self.ItemClipsChildrenToShape:
parents.append(p)
return parents
def viewRect(self):
"""Return the visible bounds of this item's ViewBox or GraphicsWidget,
in the local coordinate system of the item."""
view = self.getViewBox()
if view is None:
return None
bounds = self.mapRectFromView(view.viewRect())
if bounds is None:
return None
bounds = bounds.normalized()
## nah.
#for p in self.getBoundingParents():
#bounds &= self.mapRectFromScene(p.sceneBoundingRect())
return bounds
def pixelVectors(self, direction=None):
"""Return vectors in local coordinates representing the width and height of a view pixel.
If direction is specified, then return vectors parallel and orthogonal to it.
Return (None, None) if pixel size is not yet defined (usually because the item has not yet been displayed)
or if pixel size is below floating-point precision limit.
"""
## This is an expensive function that gets called very frequently.
## We have two levels of cache to try speeding things up.
dt = self.deviceTransform()
if dt is None:
return None, None
## Ignore translation. If the translation is much larger than the scale
## (such as when looking at unix timestamps), we can get floating-point errors.
dt.setMatrix(dt.m11(), dt.m12(), 0, dt.m21(), dt.m22(), 0, 0, 0, 1)
if direction is None:
direction = QtCore.QPointF(1, 0)
elif direction.manhattanLength() == 0:
raise Exception("Cannot compute pixel length for 0-length vector.")
key = (dt.m11(), dt.m21(), dt.m12(), dt.m22(), direction.x(), direction.y())
## check local cache
if key == self._pixelVectorCache[0]:
return tuple(map(Point, self._pixelVectorCache[1])) ## return a *copy*
## check global cache
pv = self._pixelVectorGlobalCache.get(key, None)
if pv is not None:
self._pixelVectorCache = [key, pv]
return tuple(map(Point,pv)) ## return a *copy*
## attempt to re-scale direction vector to fit within the precision of the coordinate system
## Here's the problem: we need to map the vector 'direction' from the item to the device, via transform 'dt'.
## In some extreme cases, this mapping can fail unless the length of 'direction' is cleverly chosen.
## Example:
## dt = [ 1, 0, 2
## 0, 2, 1e20
## 0, 0, 1 ]
## Then we map the origin (0,0) and direction (0,1) and get:
## o' = 2,1e20
## d' = 2,1e20 <-- should be 1e20+2, but this can't be represented with a 32-bit float
##
## |o' - d'| == 0 <-- this is the problem.
## Perhaps the easiest solution is to exclude the transformation column from dt. Does this cause any other problems?
#if direction.x() == 0:
#r = abs(dt.m32())/(abs(dt.m12()) + abs(dt.m22()))
##r = 1.0/(abs(dt.m12()) + abs(dt.m22()))
#elif direction.y() == 0:
#r = abs(dt.m31())/(abs(dt.m11()) + abs(dt.m21()))
##r = 1.0/(abs(dt.m11()) + abs(dt.m21()))
#else:
#r = ((abs(dt.m32())/(abs(dt.m12()) + abs(dt.m22()))) * (abs(dt.m31())/(abs(dt.m11()) + abs(dt.m21()))))**0.5
#if r == 0:
#r = 1. ## shouldn't need to do this; probably means the math above is wrong?
#directionr = direction * r
directionr = direction
## map direction vector onto device
#viewDir = Point(dt.map(directionr) - dt.map(Point(0,0)))
#mdirection = dt.map(directionr)
dirLine = QtCore.QLineF(QtCore.QPointF(0,0), directionr)
viewDir = dt.map(dirLine)
if viewDir.length() == 0:
return None, None ## pixel size cannot be represented on this scale
## get unit vector and orthogonal vector (length of pixel)
#orthoDir = Point(viewDir[1], -viewDir[0]) ## orthogonal to line in pixel-space
try:
normView = viewDir.unitVector()
#normView = viewDir.norm() ## direction of one pixel orthogonal to line
normOrtho = normView.normalVector()
#normOrtho = orthoDir.norm()
except:
raise Exception("Invalid direction %s" %directionr)
## map back to item
dti = fn.invertQTransform(dt)
#pv = Point(dti.map(normView)-dti.map(Point(0,0))), Point(dti.map(normOrtho)-dti.map(Point(0,0)))
pv = Point(dti.map(normView).p2()), Point(dti.map(normOrtho).p2())
self._pixelVectorCache[1] = pv
self._pixelVectorCache[0] = dt
self._pixelVectorGlobalCache[key] = pv
return self._pixelVectorCache[1]
def pixelLength(self, direction, ortho=False):
"""Return the length of one pixel in the direction indicated (in local coordinates)
If ortho=True, then return the length of one pixel orthogonal to the direction indicated.
Return None if pixel size is not yet defined (usually because the item has not yet been displayed).
"""
normV, orthoV = self.pixelVectors(direction)
if normV == None or orthoV == None:
return None
if ortho:
return orthoV.length()
return normV.length()
def pixelSize(self):
## deprecated
v = self.pixelVectors()
if v == (None, None):
return None, None
return (v[0].x()**2+v[0].y()**2)**0.5, (v[1].x()**2+v[1].y()**2)**0.5
def pixelWidth(self):
## deprecated
vt = self.deviceTransform()
if vt is None:
return 0
vt = fn.invertQTransform(vt)
return vt.map(QtCore.QLineF(0, 0, 1, 0)).length()
def pixelHeight(self):
## deprecated
vt = self.deviceTransform()
if vt is None:
return 0
vt = fn.invertQTransform(vt)
return vt.map(QtCore.QLineF(0, 0, 0, 1)).length()
#return Point(vt.map(QtCore.QPointF(0, 1))-vt.map(QtCore.QPointF(0, 0))).length()
def mapToDevice(self, obj):
"""
Return *obj* mapped from local coordinates to device coordinates (pixels).
If there is no device mapping available, return None.
"""
vt = self.deviceTransform()
if vt is None:
return None
return vt.map(obj)
def mapFromDevice(self, obj):
"""
Return *obj* mapped from device coordinates (pixels) to local coordinates.
If there is no device mapping available, return None.
"""
vt = self.deviceTransform()
if vt is None:
return None
if isinstance(obj, QtCore.QPoint):
obj = QtCore.QPointF(obj)
vt = fn.invertQTransform(vt)
return vt.map(obj)
def mapRectToDevice(self, rect):
"""
Return *rect* mapped from local coordinates to device coordinates (pixels).
If there is no device mapping available, return None.
"""
vt = self.deviceTransform()
if vt is None:
return None
return vt.mapRect(rect)
def mapRectFromDevice(self, rect):
"""
Return *rect* mapped from device coordinates (pixels) to local coordinates.
If there is no device mapping available, return None.
"""
vt = self.deviceTransform()
if vt is None:
return None
vt = fn.invertQTransform(vt)
return vt.mapRect(rect)
def mapToView(self, obj):
vt = self.viewTransform()
if vt is None:
return None
return vt.map(obj)
def mapRectToView(self, obj):
vt = self.viewTransform()
if vt is None:
return None
return vt.mapRect(obj)
def mapFromView(self, obj):
vt = self.viewTransform()
if vt is None:
return None
vt = fn.invertQTransform(vt)
return vt.map(obj)
def mapRectFromView(self, obj):
vt = self.viewTransform()
if vt is None:
return None
cache = self._mapRectFromViewGlobalCache
k = (
vt.m11(), vt.m12(), vt.m13(),
vt.m21(), vt.m22(), vt.m23(),
vt.m31(), vt.m32(), vt.m33(),
)
try:
inv_vt = cache[k]
except KeyError:
inv_vt = fn.invertQTransform(vt)
cache[k] = inv_vt
return inv_vt.mapRect(obj)
def pos(self):
return Point(self._qtBaseClass.pos(self))
def viewPos(self):
return self.mapToView(self.mapFromParent(self.pos()))
def parentItem(self):
## PyQt bug -- some items are returned incorrectly.
return GraphicsScene.translateGraphicsItem(self._qtBaseClass.parentItem(self))
def setParentItem(self, parent):
## Workaround for Qt bug: https://bugreports.qt-project.org/browse/QTBUG-18616
if parent is not None:
pscene = parent.scene()
if pscene is not None and self.scene() is not pscene:
pscene.addItem(self)
return self._qtBaseClass.setParentItem(self, parent)
def childItems(self):
## PyQt bug -- some child items are returned incorrectly.
return list(map(GraphicsScene.translateGraphicsItem, self._qtBaseClass.childItems(self)))
def sceneTransform(self):
## Qt bug: do no allow access to sceneTransform() until
## the item has a scene.
if self.scene() is None:
return self.transform()
else:
return self._qtBaseClass.sceneTransform(self)
def transformAngle(self, relativeItem=None):
"""Return the rotation produced by this item's transform (this assumes there is no shear in the transform)
If relativeItem is given, then the angle is determined relative to that item.
"""
if relativeItem is None:
relativeItem = self.parentItem()
tr = self.itemTransform(relativeItem)
if isinstance(tr, tuple): ## difference between pyside and pyqt
tr = tr[0]
#vec = tr.map(Point(1,0)) - tr.map(Point(0,0))
vec = tr.map(QtCore.QLineF(0,0,1,0))
#return Point(vec).angle(Point(1,0))
return vec.angleTo(QtCore.QLineF(vec.p1(), vec.p1()+QtCore.QPointF(1,0)))
#def itemChange(self, change, value):
#ret = self._qtBaseClass.itemChange(self, change, value)
#if change == self.ItemParentHasChanged or change == self.ItemSceneHasChanged:
#print "Item scene changed:", self
#self.setChildScene(self) ## This is bizarre.
#return ret
#def setChildScene(self, ch):
#scene = self.scene()
#for ch2 in ch.childItems():
#if ch2.scene() is not scene:
#print "item", ch2, "has different scene:", ch2.scene(), scene
#scene.addItem(ch2)
#QtGui.QApplication.processEvents()
#print " --> ", ch2.scene()
#self.setChildScene(ch2)
def parentChanged(self):
"""Called when the item's parent has changed.
This method handles connecting / disconnecting from ViewBox signals
to make sure viewRangeChanged works properly. It should generally be
extended, not overridden."""
self._updateView()
def _updateView(self):
## called to see whether this item has a new view to connect to
## NOTE: This is called from GraphicsObject.itemChange or GraphicsWidget.itemChange.
if not hasattr(self, '_connectedView'):
# Happens when Python is shutting down.
return
## It is possible this item has moved to a different ViewBox or widget;
## clear out previously determined references to these.
self.forgetViewBox()
self.forgetViewWidget()
## check for this item's current viewbox or view widget
view = self.getViewBox()
#if view is None:
##print " no view"
#return
oldView = None
if self._connectedView is not None:
oldView = self._connectedView()
if view is oldView:
#print " already have view", view
return
## disconnect from previous view
if oldView is not None:
for signal, slot in [('sigRangeChanged', self.viewRangeChanged),
('sigDeviceRangeChanged', self.viewRangeChanged),
('sigTransformChanged', self.viewTransformChanged),
('sigDeviceTransformChanged', self.viewTransformChanged)]:
try:
getattr(oldView, signal).disconnect(slot)
except (TypeError, AttributeError, RuntimeError):
# TypeError and RuntimeError are from pyqt and pyside, respectively
pass
self._connectedView = None
## connect to new view
if view is not None:
#print "connect:", self, view
if hasattr(view, 'sigDeviceRangeChanged'):
# connect signals from GraphicsView
view.sigDeviceRangeChanged.connect(self.viewRangeChanged)
view.sigDeviceTransformChanged.connect(self.viewTransformChanged)
else:
# connect signals from ViewBox
view.sigRangeChanged.connect(self.viewRangeChanged)
view.sigTransformChanged.connect(self.viewTransformChanged)
self._connectedView = weakref.ref(view)
self.viewRangeChanged()
self.viewTransformChanged()
## inform children that their view might have changed
self._replaceView(oldView)
self.viewChanged(view, oldView)
def viewChanged(self, view, oldView):
"""Called when this item's view has changed
(ie, the item has been added to or removed from a ViewBox)"""
pass
def _replaceView(self, oldView, item=None):
if item is None:
item = self
for child in item.childItems():
if isinstance(child, GraphicsItem):
if child.getViewBox() is oldView:
child._updateView()
#self._replaceView(oldView, child)
else:
self._replaceView(oldView, child)
def viewRangeChanged(self):
"""
Called whenever the view coordinates of the ViewBox containing this item have changed.
"""
pass
def viewTransformChanged(self):
"""
Called whenever the transformation matrix of the view has changed.
(eg, the view range has changed or the view was resized)
"""
pass
#def prepareGeometryChange(self):
#self._qtBaseClass.prepareGeometryChange(self)
#self.informViewBoundsChanged()
def informViewBoundsChanged(self):
"""
Inform this item's container ViewBox that the bounds of this item have changed.
This is used by ViewBox to react if auto-range is enabled.
"""
view = self.getViewBox()
if view is not None and hasattr(view, 'implements') and view.implements('ViewBox'):
view.itemBoundsChanged(self) ## inform view so it can update its range if it wants
def childrenShape(self):
"""Return the union of the shapes of all descendants of this item in local coordinates."""
childs = self.allChildItems()
shapes = [self.mapFromItem(c, c.shape()) for c in self.allChildItems()]
return reduce(operator.add, shapes)
def allChildItems(self, root=None):
"""Return list of the entire item tree descending from this item."""
if root is None:
root = self
tree = []
for ch in root.childItems():
tree.append(ch)
tree.extend(self.allChildItems(ch))
return tree
def setExportMode(self, export, opts=None):
"""
This method is called by exporters to inform items that they are being drawn for export
with a specific set of options. Items access these via self._exportOptions.
When exporting is complete, _exportOptions is set to False.
"""
if opts is None:
opts = {}
if export:
self._exportOpts = opts
#if 'antialias' not in opts:
#self._exportOpts['antialias'] = True
else:
self._exportOpts = False
#def update(self):
#self._qtBaseClass.update(self)
#print "Update:", self
def getContextMenus(self, event):
return [self.getMenu()] if hasattr(self, "getMenu") else []
|
the-stack_0_13248 | import pytest
from thedarn.rules.gradle_wrapper import match, get_new_command
from thedarn.types import Command
@pytest.fixture(autouse=True)
def exists(mocker):
return mocker.patch('thedarn.rules.gradle_wrapper.os.path.isfile',
return_value=True)
@pytest.mark.parametrize('command', [
Command('gradle tasks', 'gradle: not found'),
Command('gradle build', 'gradle: not found')])
def test_match(mocker, command):
mocker.patch('thedarn.rules.gradle_wrapper.which', return_value=None)
assert match(command)
@pytest.mark.parametrize('command, gradlew, which', [
(Command('gradle tasks', 'gradle: not found'), False, None),
(Command('gradle tasks', 'command not found'), True, '/usr/bin/gradle'),
(Command('npm tasks', 'npm: not found'), True, None)])
def test_not_match(mocker, exists, command, gradlew, which):
mocker.patch('thedarn.rules.gradle_wrapper.which', return_value=which)
exists.return_value = gradlew
assert not match(command)
@pytest.mark.parametrize('script, result', [
('gradle assemble', './gradlew assemble'),
('gradle --help', './gradlew --help'),
('gradle build -c', './gradlew build -c')])
def test_get_new_command(script, result):
command = Command(script, '')
assert get_new_command(command) == result
|
the-stack_0_13250 | import pygame
aktualnie_wyswietlane = [[0 for col in range(14)] for row in range(5)]
aktualna_podpowiedz = [[0 for col_ in range(14)] for row_ in range(5)]
class Kod(object):
def main(self, aktualny_kolor):
Kod.reset(self)
self.pos = pygame.mouse.get_pos()
if self.stop == 0:
if aktualnie_wyswietlane[1][self.wiersz] and aktualnie_wyswietlane[2][self.wiersz] and aktualnie_wyswietlane[3][self.wiersz] and aktualnie_wyswietlane[4][self.wiersz]:
enter = pygame.key.get_pressed()
if enter[13]:
self.enter += 1
Kod.wyswietlanie_pytajnikow(self)
Kod.wyswietlanie_kulek(self)
if aktualny_kolor == 6:
x_1, x_2 = 77, 111
if aktualny_kolor == 7:
x_1, x_2 = 57, 89
if aktualny_kolor == 8:
x_1, x_2 = 37, 70
x_pole_1, x_pole_2 = 156, 190
if self.click[0]:
pozycja = pygame.mouse.get_pos()
else:
pozycja = (-1, -1)
for pole in range(1, 5):
if pozycja[0] in range(x_pole_1, x_pole_2) and pozycja[1] in range(self.y_1, self.y_1 + 35):
self.click_pole = pole
x_pole_1 += 58
x_pole_2 += 58
# print(self.click_pole)
for kulka in range(1, aktualny_kolor + 1):
if pozycja[0] in range(x_1, x_2) and pozycja[1] in range(665, 700):
aktualnie_wyswietlane[self.click_pole][self.wiersz] = kulka
x_1 += 46
x_2 += 46
def kod_gry(self, aktualny_kolor):
self.kod_los = self.twoj_kod
dodatkowe_2 = 0
sprawdzenie = [0, 0, 0, 0]
for wiersz in range(0, self.click_runda * 2 + 6):
index = 1
tmp = [0, 0, 0, 0]
self.podpowiedz = []
if self.enter == wiersz:
self.podpowiedz.append(0)
if aktualnie_wyswietlane[1][wiersz + 1] == self.kod_los[0]:
self.podpowiedz.append(1)
index += 1
tmp.append(self.kod_los[0])
if aktualnie_wyswietlane[2][wiersz + 1] == self.kod_los[1]:
self.podpowiedz.append(1)
index += 1
tmp.append(self.kod_los[1])
if aktualnie_wyswietlane[3][wiersz + 1] == self.kod_los[2]:
self.podpowiedz.append(1)
index += 1
tmp.append(self.kod_los[2])
if aktualnie_wyswietlane[4][wiersz + 1] == self.kod_los[3]:
self.podpowiedz.append(1)
index += 1
tmp.append(self.kod_los[3])
self.kod_los_blad = self.kod_los.copy()
for kol_2 in range(1, 5):
sprawdzenie[kol_2-1] = aktualnie_wyswietlane[kol_2][wiersz + 1]
for kol in range(1, 5):
if sprawdzenie.count(aktualnie_wyswietlane[kol][wiersz + 1]) > tmp.count(aktualnie_wyswietlane[kol][wiersz + 1]):
dodatkowe_2 = self.kod_los_blad.count(aktualnie_wyswietlane[kol][wiersz + 1]) - tmp.count(aktualnie_wyswietlane[kol][wiersz + 1])
sprawdzenie.remove(aktualnie_wyswietlane[kol][wiersz + 1])
if dodatkowe_2 or (aktualnie_wyswietlane[kol][wiersz + 1] in self.kod_los_blad and not aktualnie_wyswietlane[kol][wiersz + 1] in tmp):
self.podpowiedz.append(2)
if self.kod_los_blad.count(aktualnie_wyswietlane[kol][wiersz + 1]):
self.kod_los_blad.remove(aktualnie_wyswietlane[kol][wiersz + 1])
dodatkowe_2 = 0
#print("podp=",self.podpowiedz, "tmp=",tmp, "sprawdz=",sprawdzenie, "blad=",self.kod_los_blad)
while index <= 5:
self.podpowiedz.append(0)
index += 1
for kolumna in range(1, 5):
if wiersz == self.enter and self.podpowiedz[kolumna] == 0:
aktualna_podpowiedz[kolumna][wiersz + 1] = 0
if self.podpowiedz[kolumna] == 1:
aktualna_podpowiedz[kolumna][wiersz + 1] = 1
if self.podpowiedz[kolumna] == 2:
aktualna_podpowiedz[kolumna][wiersz + 1] = 2
Kod.wyswietlanie_podpowiedzi(self)
Kod.czy_wygrana(self)
def wyswietlanie_kulek(self):
czerwona = pygame.image.load("Obrazy/kulki/czerwona.png")
zielona = pygame.image.load("Obrazy/kulki/zielona.png")
niebieska = pygame.image.load("Obrazy/kulki/niebieska.png")
blekitna = pygame.image.load("Obrazy/kulki/blekitna.png")
rozowa = pygame.image.load("Obrazy/kulki/rozowa.png")
zolta = pygame.image.load("Obrazy/kulki/zolta.png")
szara = pygame.image.load("Obrazy/kulki/szara.png")
czarna = pygame.image.load("Obrazy/kulki/czarna.png")
self.y = 571
for wysokosc in range(1, 14):
x = 156
for xz in range(1, 5):
if aktualnie_wyswietlane[xz][wysokosc] == 1:
self.screen.blit(czerwona, (x, self.y))
elif aktualnie_wyswietlane[xz][wysokosc] == 2:
self.screen.blit(zielona, (x, self.y))
elif aktualnie_wyswietlane[xz][wysokosc] == 3:
self.screen.blit(niebieska, (x, self.y))
elif aktualnie_wyswietlane[xz][wysokosc] == 4:
self.screen.blit(blekitna, (x, self.y))
elif aktualnie_wyswietlane[xz][wysokosc] == 5:
self.screen.blit(rozowa, (x, self.y))
elif aktualnie_wyswietlane[xz][wysokosc] == 6:
self.screen.blit(zolta, (x, self.y))
elif aktualnie_wyswietlane[xz][wysokosc] == 7:
self.screen.blit(szara, (x, self.y))
elif aktualnie_wyswietlane[xz][wysokosc] == 8:
self.screen.blit(czarna, (x, self.y))
x += 58
self.y -= 50
def wyswietlanie_pytajnikow(self):
pytajnik = pygame.image.load("Obrazy/kulki/pytajnik2.png")
for wiersz_2 in range(1, self.click_runda * 2 + 6):
if self.enter == wiersz_2:
self.y_1 = 571 - 50 * wiersz_2
self.wiersz = wiersz_2 + 1 # self wiersz - ktora linijka po enterze
self.screen.blit(pytajnik, (156, 571 - 50 * wiersz_2))
self.screen.blit(pytajnik, (214, 571 - 50 * wiersz_2))
self.screen.blit(pytajnik, (272, 571 - 50 * wiersz_2))
self.screen.blit(pytajnik, (330, 571 - 50 * wiersz_2))
elif self.enter == 0:
self.screen.blit(pytajnik, (156, 571))
self.screen.blit(pytajnik, (214, 571))
self.screen.blit(pytajnik, (272, 571))
self.screen.blit(pytajnik, (330, 571))
def wyswietlanie_podpowiedzi(self):
mala_czarna = pygame.image.load("Obrazy/kulki/mala_czarna.png")
mala_biala = pygame.image.load("Obrazy/kulki/mala_biala.png")
for wysokos_2 in range(1, 14):
if self.enter + 1 == wysokos_2:
continue
if aktualna_podpowiedz[1][wysokos_2] == 1:
self.screen.blit(mala_czarna, (37, 623 - 50 * wysokos_2))
elif aktualna_podpowiedz[1][wysokos_2] == 2:
self.screen.blit(mala_biala, (37, 623 - 50 * wysokos_2))
if aktualna_podpowiedz[2][wysokos_2] == 1:
self.screen.blit(mala_czarna, (61, 623 - 50 * wysokos_2))
elif aktualna_podpowiedz[2][wysokos_2] == 2:
self.screen.blit(mala_biala, (61, 623 - 50 * wysokos_2))
if aktualna_podpowiedz[3][wysokos_2] == 1:
self.screen.blit(mala_czarna, (37, 647 - 50 * wysokos_2))
elif aktualna_podpowiedz[3][wysokos_2] == 2:
self.screen.blit(mala_biala, (37, 647 - 50 * wysokos_2))
if aktualna_podpowiedz[4][wysokos_2] == 1:
self.screen.blit(mala_czarna, (61, 647 - 50 * wysokos_2))
elif aktualna_podpowiedz[4][wysokos_2] == 2:
self.screen.blit(mala_biala, (61, 647 - 50 * wysokos_2))
def czy_wygrana(self):
wygrana_w_ostatiej = 0
for wiersz in range(1, 14):
if aktualna_podpowiedz[1][wiersz] == 1 and aktualna_podpowiedz[2][wiersz] == 1 :
if aktualna_podpowiedz[3][wiersz] == 1 and aktualna_podpowiedz[4][wiersz] == 1:
if self.enter + 1 == wiersz:
continue
wygrana_w_ostatiej = 1
wygrana = pygame.image.load("Obrazy/wygrana.png")
for q in range(1,5):
for p in range(0, self.click_runda * 2 + 7):
aktualnie_wyswietlane[q][p] = self.kod_los[q - 1]
self.screen.blit(wygrana, (0, 300))
self.stop = 1
if self.wstecz == 1:
self.stop = 0
self.aktualnie_wyswietlane = 1
self.mozna_grac = 0
self.twoj_kod = [0, 0, 0, 0]
self.reset = 1
Kod.reset(self)
if self.enter == self.click_runda * 2 + 6 and wygrana_w_ostatiej == 0:
przegrana = pygame.image.load("Obrazy/przegrana.png")
self.screen.blit(przegrana, (0, 300))
for q in range(1, 5):
for p in range(0, self.click_runda * 2 + 7):
aktualnie_wyswietlane[q][p] = self.kod_los[q - 1]
self.stop = 1
if self.wstecz == 1:
self.stop = 0
self.aktualnie_wyswietlane = 1
self.reset = 1
Kod.reset(self)
def reset(self):
if self.reset == 1:
for iksy in range(1, 5):
for igreki in range(1, 14):
aktualnie_wyswietlane[iksy][igreki] = 0
aktualna_podpowiedz[iksy][igreki] = 0
self.click_vs = 0
self.click_runda = 0
self.aktualny_kolor = 0
self.aktualny_vs = 0
self.click_kolor = 0
self.wstecz = 0
self.click_miejsce = 0
self.click = pygame.mouse.get_pressed()
self.click_pole = 0
self.enter = 0
self.y_1 = 571
self.yz = 1
self.wiersz = 1
self.kod_los = [0, 0, 0, 0]
self.reset = 0
self.mozna_grac = 0
self.twoj_kod = [0, 0, 0, 0]
self.mozna_grac = 0
class Ustawianie_Kodu(object):
def main(self, ilosc_kolorow):
ustawiony_kod = pygame.image.load("Obrazy/ukladanie_kodu.png")
self.screen.blit(ustawiony_kod, (0,0))
if ilosc_kolorow == 6:
Ustawianie_Kodu.kulki_szesc(self)
x_1, x_2 = 77, 111
elif ilosc_kolorow == 7:
Ustawianie_Kodu.kulki_siedem(self)
x_1, x_2 = 57, 89
elif ilosc_kolorow == 8:
Ustawianie_Kodu.kulki_osiem(self)
x_1, x_2 = 37, 70
x_pole_1, x_pole_2 = 122, 155
if self.click[0]:
pozycja = pygame.mouse.get_pos()
else:
pozycja = (-1, -1)
# Klikanie na twoj kod
for pole in range(0, 4):
if pozycja[0] in range(x_pole_1, x_pole_2) and pozycja[1] in range(546, 580):
self.click_pole = pole
x_pole_1 += 49
x_pole_2 += 49
# klikanie na liste kolorow
for kulka in range(1, ilosc_kolorow + 1):
if pozycja[0] in range(x_1, x_2) and pozycja[1] in range(665, 700):
self.twoj_kod[self.click_pole] = kulka
x_1 += 46
x_2 += 46
czerwona = pygame.image.load("Obrazy/kulki/czerwona.png")
zielona = pygame.image.load("Obrazy/kulki/zielona.png")
niebieska = pygame.image.load("Obrazy/kulki/niebieska.png")
blekitna = pygame.image.load("Obrazy/kulki/blekitna.png")
rozowa = pygame.image.load("Obrazy/kulki/rozowa.png")
zolta = pygame.image.load("Obrazy/kulki/zolta.png")
szara = pygame.image.load("Obrazy/kulki/szara.png")
czarna = pygame.image.load("Obrazy/kulki/czarna.png")
x = 122
for numer in range(4):
if self.twoj_kod[numer] == 1:
self.screen.blit(czerwona, (x, 546))
if self.twoj_kod[numer] == 2:
self.screen.blit(zielona, (x, 546))
if self.twoj_kod[numer] == 3:
self.screen.blit(niebieska, (x, 546))
if self.twoj_kod[numer] == 4:
self.screen.blit(blekitna, (x, 546))
if self.twoj_kod[numer] == 5:
self.screen.blit(rozowa, (x, 546))
if self.twoj_kod[numer] == 6:
self.screen.blit(zolta, (x, 546))
if self.twoj_kod[numer] == 7:
self.screen.blit(szara, (x, 546))
if self.twoj_kod[numer] == 8:
self.screen.blit(czarna, (x, 546))
x += 49
if self.stop == 0:
if self.twoj_kod[0] and self.twoj_kod[1] and self.twoj_kod[2] and self.twoj_kod[3]:
enter = pygame.key.get_pressed()
if enter[13]:
self.mozna_grac = 1
if self.wstecz == 1:
wyjscie = pygame.image.load("Obrazy/wyjsc.png")
self.screen.blit(wyjscie, (0, 300))
self.stop = 1
def kulki_szesc(self):
k_6 = pygame.image.load("Obrazy/kulki_6.png")
self.screen.blit(k_6, (62, 650))
def kulki_siedem(self):
k_7 = pygame.image.load("Obrazy/kulki_7.png")
self.screen.blit(k_7, (40, 650))
def kulki_osiem(self):
k_8 = pygame.image.load("Obrazy/kulki_8.png")
self.screen.blit(k_8, (21, 650)) |
the-stack_0_13253 | import setuptools
#README as long_descriptions
with open("README.md", "r") as readme:
long_description = readme.read()
setuptools.setup(
name='automatedweb',
version='1.0.2',
url='https://github.com/renanleonellocastro/automatedweb.git',
license='MIT License',
author='Renan Leonello Castro',
author_email='[email protected]',
keywords='automatedweb web automated post get requests rest restfull',
description='A tool to make it easy to communicate with web systems writting and reading data from them',
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
py_modules=["automatedweb"],
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
install_requires=[
"requests>=2.22.0",
"pyquery>=1.4.0",
"json5>=0.8.5",
"urllib3>=1.25.10",
],
)
|
the-stack_0_13255 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
idp_table = sql.Table(
'identity_provider',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('enabled', sql.Boolean, nullable=False),
sql.Column('description', sql.Text(), nullable=True))
idp_table.create(migrate_engine, checkfirst=True)
federation_protocol_table = sql.Table(
'federation_protocol',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('idp_id', sql.String(64),
sql.ForeignKey('identity_provider.id', ondelete='CASCADE'),
primary_key=True),
sql.Column('mapping_id', sql.String(64), nullable=True))
federation_protocol_table.create(migrate_engine, checkfirst=True)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
tables = ['identity_provider', 'federation_protocol']
for table_name in tables:
table = sql.Table(table_name, meta, autoload=True)
table.drop()
|
the-stack_0_13257 | import argparse
import collections
import json
import os
import re
import string
import sys
from copy import deepcopy
from bs4 import BeautifulSoup
class EvalOpts:
r"""
The options which the matrix evaluation process needs.
Arguments:
data_file (str): the SQuAD-style json file of the dataset in evaluation.
root_dir (str): the root directory of the raw WebSRC dataset, which contains the HTML files.
pred_file (str): the prediction file which contain the best predicted answer text of each question from the
model.
tag_pred_file (str): the prediction file which contain the best predicted answer tag id of each question from
the model.
result_file (str): the file to write down the matrix evaluation results of each question.
out_file (str): the file to write down the final matrix evaluation results of the whole dataset.
"""
def __init__(self, data_file, root_dir, pred_file, tag_pred_file, result_file='', out_file=""):
self.data_file = data_file
self.root_dir = root_dir
self.pred_file = pred_file
self.tag_pred_file = tag_pred_file
self.result_file = result_file
self.out_file = out_file
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('data_file', metavar='data.json', help='Input data JSON file.')
parser.add_argument('root_dir', metavar='./data', help='The root directory of the raw WebSRC dataset')
parser.add_argument('pred_file', metavar='pred.json', help='Model predictions.')
parser.add_argument('tag_pred_file', metavar='tag_pred.json', help='Model predictions.')
parser.add_argument('--result-file', '-r', metavar='qas_eval.json')
parser.add_argument('--out-file', '-o', metavar='eval.json',
help='Write accuracy metrics to file (default is stdout).')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def make_pages_list(dataset):
r"""
Record all the pages which appears in the dataset and return the list.
"""
pages_list = []
last_page = None
for domain in dataset:
for w in domain['websites']:
for qa in w['qas']:
if last_page != qa['id'][:4]:
last_page = qa['id'][:4]
pages_list.append(last_page)
return pages_list
def make_qid_to_has_ans(dataset):
r"""
Pick all the questions which has answer in the dataset and return the list.
"""
qid_to_has_ans = {}
for domain in dataset:
for w in domain['websites']:
for qa in w['qas']:
qid_to_has_ans[qa['id']] = bool(qa['answers'])
return qid_to_has_ans
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
r"""
Get the word list in the input.
"""
if not s:
return []
return normalize_answer(s).split()
def compute_exact(a_gold, a_pred):
r"""
Calculate the exact match.
"""
if normalize_answer(a_gold) == normalize_answer(a_pred):
return 1
return 0
def compute_f1(a_gold, a_pred):
r"""
Calculate the f1 score.
"""
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def compute_pos(f, t_gold, addition, t_pred):
r"""
Calculate the POS score.
Arguments:
f (str): the html file on which the question is based.
t_gold (int): the gold answer tag id provided by the dataset (the value correspond to the key element_id).
addition (int): the addition information used for yes/no question provided by the dataset (the value
corresponding to the key answer_start).
t_pred (list[int]): the tag ids of the tags corresponding the each word in the predicted answer.
Returns:
float: the POS score.
"""
h = BeautifulSoup(open(f), "lxml")
p_gold, e_gold = set(), h.find(tid=t_gold)
if e_gold is None:
if len(t_pred) != 1:
return 0
else:
t = t_pred[0]
e_pred, e_prev = h.find(tid=t), h.find(tid=t-1)
if (e_pred is not None) or (addition == 1 and e_prev is not None) or\
(addition == 0 and e_prev is None):
return 0
else:
return 1
else:
p_gold.add(e_gold['tid'])
for e in e_gold.parents:
if int(e['tid']) < 2:
break
p_gold.add(e['tid'])
p = None
for t in t_pred:
p_pred, e_pred = set(), h.find(tid=t)
if e_pred is not None:
p_pred.add(e_pred['tid'])
if e_pred.name != 'html':
for e in e_pred.parents:
if int(e['tid']) < 2:
break
p_pred.add(e['tid'])
else:
p_pred.add(str(t))
if p is None:
p = p_pred
else:
p = p & p_pred # 预测值的公共祖先序列,except html&body
return len(p_gold & p) / len(p_gold | p)
def get_raw_scores(dataset, preds, tag_preds, root_dir):
r"""
Calculate all the three matrix (exact match, f1, POS) for each question.
Arguments:
dataset (dict): the dataset in use.
preds (dict): the answer text prediction for each question in the dataset.
tag_preds (dict): the answer tags prediction for each question in the dataset.
root_dir (str): the base directory for the html files.
Returns:
tuple(dict, dict, dict): exact match, f1, pos scores for each question.
"""
exact_scores = {}
f1_scores = {}
pos_scores = {}
for websites in dataset:
for w in websites['websites']:
f = os.path.join(root_dir, websites['domain'], w['page_id'][0:2], 'processed_data',
w['page_id'] + '.html')
for qa in w['qas']:
qid = qa['id']
gold_answers = [a['text'] for a in qa['answers']
if normalize_answer(a['text'])]
gold_tag_answers = [a['element_id'] for a in qa['answers']]
additional_tag_information = [a['answer_start'] for a in qa['answers']]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
gold_answers = ['']
if qid not in preds:
print('Missing prediction for %s' % qid)
continue
a_pred, t_pred = preds[qid], tag_preds[qid]
# Take max over all gold answers
exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers)
f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers)
pos_scores[qid] = max(compute_pos(f, t, a, t_pred)
for t, a in zip(gold_tag_answers, additional_tag_information))
return exact_scores, f1_scores, pos_scores
def make_eval_dict(exact_scores, f1_scores, pos_scores, qid_list=None):
r"""
Make the dictionary to show the evaluation results.
"""
if qid_list is None:
total = len(exact_scores)
return collections.OrderedDict([
('exact', 100.0 * sum(exact_scores.values()) / total),
('f1', 100.0 * sum(f1_scores.values()) / total),
('pos', 100.0 * sum(pos_scores.values()) / total),
('total', total),
])
else:
total = len(qid_list)
if total == 0:
return collections.OrderedDict([
('exact', 0),
('f1', 0),
('pos', 0),
('total', 0),
])
return collections.OrderedDict([
('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total),
('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total),
('pos', 100.0 * sum(pos_scores[k] for k in qid_list) / total),
('total', total),
])
def merge_eval(main_eval, new_eval, prefix):
for k in new_eval:
main_eval['%s_%s' % (prefix, k)] = new_eval[k]
def main(opts):
with open(opts.data_file) as f:
dataset_json = json.load(f)
dataset = dataset_json['data']
if isinstance(opts.pred_file, str):
with open(opts.pred_file) as f:
preds = json.load(f)
else:
preds = opts.pred_file
if isinstance(opts.tag_pred_file, str):
with open(opts.tag_pred_file) as f:
tag_preds = json.load(f)
else:
tag_preds = opts.tag_pred_file
qid_to_has_ans = make_qid_to_has_ans(dataset)
has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
exact, f1, pos = get_raw_scores(dataset, preds, tag_preds, opts.root_dir)
out_eval = make_eval_dict(exact, f1, pos)
if has_ans_qids:
has_ans_eval = make_eval_dict(exact, f1, pos, qid_list=has_ans_qids)
merge_eval(out_eval, has_ans_eval, 'HasAns')
if no_ans_qids:
no_ans_eval = make_eval_dict(exact, f1, pos, qid_list=no_ans_qids)
merge_eval(out_eval, no_ans_eval, 'NoAns')
print(json.dumps(out_eval, indent=2))
pages_list, write_eval = make_pages_list(dataset), deepcopy(out_eval)
for p in pages_list:
pages_ans_qids = [k for k, _ in qid_to_has_ans.items() if p in k]
page_eval = make_eval_dict(exact, f1, pos, qid_list=pages_ans_qids)
merge_eval(write_eval, page_eval, p)
if opts.result_file:
with open(opts.result_file, 'w') as f:
w = {}
for k, v in qid_to_has_ans.items():
w[k] = {'exact': exact[k], 'f1': f1[k], 'pos': pos[k]}
json.dump(w, f)
if opts.out_file:
with open(opts.out_file, 'w') as f:
json.dump(write_eval, f)
return out_eval
if __name__ == '__main__':
a="$4.99"
b="$4.99"
print(compute_exact(a,b))
|
the-stack_0_13259 | import asyncio
from collections import defaultdict, deque
from collections.abc import Mapping, Set
from contextlib import suppress
from datetime import timedelta
from functools import partial
import inspect
import itertools
import json
import logging
import math
from numbers import Number
import operator
import os
import pickle
import random
import warnings
import weakref
import psutil
import sortedcontainers
from tlz import (
frequencies,
merge,
pluck,
merge_sorted,
first,
merge_with,
valmap,
second,
compose,
groupby,
concat,
)
from tornado.ioloop import IOLoop, PeriodicCallback
import dask
from . import profile
from .batched import BatchedSend
from .comm import (
normalize_address,
resolve_address,
get_address_host,
unparse_host_port,
)
from .comm.addressing import addresses_from_user_args
from .core import rpc, send_recv, clean_exception, CommClosedError, Status
from .diagnostics.plugin import SchedulerPlugin
from .http import get_handlers
from .metrics import time
from .node import ServerNode
from . import preloading
from .proctitle import setproctitle
from .security import Security
from .utils import (
All,
get_fileno_limit,
log_errors,
key_split,
validate_key,
no_default,
parse_timedelta,
parse_bytes,
shutting_down,
key_split_group,
empty_context,
tmpfile,
format_bytes,
format_time,
TimeoutError,
)
from .utils_comm import scatter_to_workers, gather_from_workers, retry_operation
from .utils_perf import enable_gc_diagnosis, disable_gc_diagnosis
from . import versions as version_module
from .publish import PublishExtension
from .queues import QueueExtension
from .semaphore import SemaphoreExtension
from .recreate_exceptions import ReplayExceptionScheduler
from .lock import LockExtension
from .event import EventExtension
from .pubsub import PubSubSchedulerExtension
from .stealing import WorkStealing
from .variable import VariableExtension
logger = logging.getLogger(__name__)
LOG_PDB = dask.config.get("distributed.admin.pdb-on-err")
DEFAULT_DATA_SIZE = parse_bytes(
dask.config.get("distributed.scheduler.default-data-size")
)
DEFAULT_EXTENSIONS = [
LockExtension,
PublishExtension,
ReplayExceptionScheduler,
QueueExtension,
VariableExtension,
PubSubSchedulerExtension,
SemaphoreExtension,
EventExtension,
]
ALL_TASK_STATES = {"released", "waiting", "no-worker", "processing", "erred", "memory"}
class ClientState:
"""
A simple object holding information about a client.
.. attribute:: client_key: str
A unique identifier for this client. This is generally an opaque
string generated by the client itself.
.. attribute:: wants_what: {TaskState}
A set of tasks this client wants kept in memory, so that it can
download its result when desired. This is the reverse mapping of
:class:`TaskState.who_wants`.
Tasks are typically removed from this set when the corresponding
object in the client's space (for example a ``Future`` or a Dask
collection) gets garbage-collected.
"""
__slots__ = ("client_key", "wants_what", "last_seen", "versions")
def __init__(self, client, versions=None):
self.client_key = client
self.wants_what = set()
self.last_seen = time()
self.versions = versions or {}
def __repr__(self):
return "<Client %r>" % (self.client_key,)
def __str__(self):
return self.client_key
class WorkerState:
"""
A simple object holding information about a worker.
.. attribute:: address
This worker's unique key. This can be its connected address
(such as ``'tcp://127.0.0.1:8891'``) or an alias (such as ``'alice'``).
.. attribute:: processing: {TaskState: cost}
A dictionary of tasks that have been submitted to this worker.
Each task state is asssociated with the expected cost in seconds
of running that task, summing both the task's expected computation
time and the expected communication time of its result.
Multiple tasks may be submitted to a worker in advance and the worker
will run them eventually, depending on its execution resources
(but see :doc:`work-stealing`).
All the tasks here are in the "processing" state.
This attribute is kept in sync with :attr:`TaskState.processing_on`.
.. attribute:: has_what: {TaskState}
The set of tasks which currently reside on this worker.
All the tasks here are in the "memory" state.
This is the reverse mapping of :class:`TaskState.who_has`.
.. attribute:: nbytes: int
The total memory size, in bytes, used by the tasks this worker
holds in memory (i.e. the tasks in this worker's :attr:`has_what`).
.. attribute:: nthreads: int
The number of CPU threads made available on this worker.
.. attribute:: resources: {str: Number}
The available resources on this worker like ``{'gpu': 2}``.
These are abstract quantities that constrain certain tasks from
running at the same time on this worker.
.. attribute:: used_resources: {str: Number}
The sum of each resource used by all tasks allocated to this worker.
The numbers in this dictionary can only be less or equal than
those in this worker's :attr:`resources`.
.. attribute:: occupancy: Number
The total expected runtime, in seconds, of all tasks currently
processing on this worker. This is the sum of all the costs in
this worker's :attr:`processing` dictionary.
.. attribute:: status: str
The current status of the worker, either ``'running'`` or ``'closed'``
.. attribute:: nanny: str
Address of the associated Nanny, if present
.. attribute:: last_seen: Number
The last time we received a heartbeat from this worker, in local
scheduler time.
.. attribute:: actors: {TaskState}
A set of all TaskStates on this worker that are actors. This only
includes those actors whose state actually lives on this worker, not
actors to which this worker has a reference.
"""
# XXX need a state field to signal active/removed?
__slots__ = (
"actors",
"address",
"bandwidth",
"extra",
"has_what",
"last_seen",
"local_directory",
"memory_limit",
"metrics",
"name",
"nanny",
"nbytes",
"nthreads",
"occupancy",
"pid",
"processing",
"resources",
"services",
"_status",
"time_delay",
"used_resources",
"versions",
)
def __init__(
self,
address=None,
pid=0,
name=None,
nthreads=0,
memory_limit=0,
local_directory=None,
services=None,
versions=None,
nanny=None,
extra=None,
):
self.address = address
self.pid = pid
self.name = name
self.nthreads = nthreads
self.memory_limit = memory_limit
self.local_directory = local_directory
self.services = services or {}
self.versions = versions or {}
self.nanny = nanny
self._status = Status.running
self.nbytes = 0
self.occupancy = 0
self.metrics = {}
self.last_seen = 0
self.time_delay = 0
self.bandwidth = parse_bytes(dask.config.get("distributed.scheduler.bandwidth"))
self.actors = set()
self.has_what = set()
self.processing = {}
self.resources = {}
self.used_resources = {}
self.extra = extra or {}
def __hash__(self):
return hash(self.address)
def __eq__(self, other):
return type(self) == type(other) and self.address == other.address
@property
def status(self):
return self._status
@status.setter
def status(self, new_status):
if isinstance(new_status, Status):
self._status = new_status
elif isinstance(new_status, str) or new_status is None:
corresponding_enum_variants = [s for s in Status if s.value == new_status]
assert len(corresponding_enum_variants) == 1
self._status = corresponding_enum_variants[0]
@property
def host(self):
return get_address_host(self.address)
def clean(self):
""" Return a version of this object that is appropriate for serialization """
ws = WorkerState(
address=self.address,
pid=self.pid,
name=self.name,
nthreads=self.nthreads,
memory_limit=self.memory_limit,
local_directory=self.local_directory,
services=self.services,
nanny=self.nanny,
extra=self.extra,
)
ws.processing = {ts.key for ts in self.processing}
return ws
def __repr__(self):
return "<Worker %r, name: %s, memory: %d, processing: %d>" % (
self.address,
self.name,
len(self.has_what),
len(self.processing),
)
def identity(self):
return {
"type": "Worker",
"id": self.name,
"host": self.host,
"resources": self.resources,
"local_directory": self.local_directory,
"name": self.name,
"nthreads": self.nthreads,
"memory_limit": self.memory_limit,
"last_seen": self.last_seen,
"services": self.services,
"metrics": self.metrics,
"nanny": self.nanny,
**self.extra,
}
@property
def ncores(self):
warnings.warn("WorkerState.ncores has moved to WorkerState.nthreads")
return self.nthreads
class TaskState:
"""
A simple object holding information about a task.
.. attribute:: key: str
The key is the unique identifier of a task, generally formed
from the name of the function, followed by a hash of the function
and arguments, like ``'inc-ab31c010444977004d656610d2d421ec'``.
.. attribute:: prefix: TaskPrefix
The broad class of tasks to which this task belongs like "inc" or
"read_csv"
.. attribute:: run_spec: object
A specification of how to run the task. The type and meaning of this
value is opaque to the scheduler, as it is only interpreted by the
worker to which the task is sent for executing.
As a special case, this attribute may also be ``None``, in which case
the task is "pure data" (such as, for example, a piece of data loaded
in the scheduler using :meth:`Client.scatter`). A "pure data" task
cannot be computed again if its value is lost.
.. attribute:: priority: tuple
The priority provides each task with a relative ranking which is used
to break ties when many tasks are being considered for execution.
This ranking is generally a 2-item tuple. The first (and dominant)
item corresponds to when it was submitted. Generally, earlier tasks
take precedence. The second item is determined by the client, and is
a way to prioritize tasks within a large graph that may be important,
such as if they are on the critical path, or good to run in order to
release many dependencies. This is explained further in
:doc:`Scheduling Policy <scheduling-policies>`.
.. attribute:: state: str
This task's current state. Valid states include ``released``,
``waiting``, ``no-worker``, ``processing``, ``memory``, ``erred``
and ``forgotten``. If it is ``forgotten``, the task isn't stored
in the ``tasks`` dictionary anymore and will probably disappear
soon from memory.
.. attribute:: dependencies: {TaskState}
The set of tasks this task depends on for proper execution. Only
tasks still alive are listed in this set. If, for whatever reason,
this task also depends on a forgotten task, the
:attr:`has_lost_dependencies` flag is set.
A task can only be executed once all its dependencies have already
been successfully executed and have their result stored on at least
one worker. This is tracked by progressively draining the
:attr:`waiting_on` set.
.. attribute:: dependents: {TaskState}
The set of tasks which depend on this task. Only tasks still alive
are listed in this set.
This is the reverse mapping of :attr:`dependencies`.
.. attribute:: has_lost_dependencies: bool
Whether any of the dependencies of this task has been forgotten.
For memory consumption reasons, forgotten tasks are not kept in
memory even though they may have dependent tasks. When a task is
forgotten, therefore, each of its dependents has their
:attr:`has_lost_dependencies` attribute set to ``True``.
If :attr:`has_lost_dependencies` is true, this task cannot go
into the "processing" state anymore.
.. attribute:: waiting_on: {TaskState}
The set of tasks this task is waiting on *before* it can be executed.
This is always a subset of :attr:`dependencies`. Each time one of the
dependencies has finished processing, it is removed from the
:attr:`waiting_on` set.
Once :attr:`waiting_on` becomes empty, this task can move from the
"waiting" state to the "processing" state (unless one of the
dependencies errored out, in which case this task is instead
marked "erred").
.. attribute:: waiters: {TaskState}
The set of tasks which need this task to remain alive. This is always
a subset of :attr:`dependents`. Each time one of the dependents
has finished processing, it is removed from the :attr:`waiters`
set.
Once both :attr:`waiters` and :attr:`who_wants` become empty, this
task can be released (if it has a non-empty :attr:`run_spec`) or
forgotten (otherwise) by the scheduler, and by any workers
in :attr:`who_has`.
.. note:: Counter-intuitively, :attr:`waiting_on` and
:attr:`waiters` are not reverse mappings of each other.
.. attribute:: who_wants: {ClientState}
The set of clients who want this task's result to remain alive.
This is the reverse mapping of :attr:`ClientState.wants_what`.
When a client submits a graph to the scheduler it also specifies
which output tasks it desires, such that their results are not released
from memory.
Once a task has finished executing (i.e. moves into the "memory"
or "erred" state), the clients in :attr:`who_wants` are notified.
Once both :attr:`waiters` and :attr:`who_wants` become empty, this
task can be released (if it has a non-empty :attr:`run_spec`) or
forgotten (otherwise) by the scheduler, and by any workers
in :attr:`who_has`.
.. attribute:: who_has: {WorkerState}
The set of workers who have this task's result in memory.
It is non-empty iff the task is in the "memory" state. There can be
more than one worker in this set if, for example, :meth:`Client.scatter`
or :meth:`Client.replicate` was used.
This is the reverse mapping of :attr:`WorkerState.has_what`.
.. attribute:: processing_on: WorkerState (or None)
If this task is in the "processing" state, which worker is currently
processing it. Otherwise this is ``None``.
This attribute is kept in sync with :attr:`WorkerState.processing`.
.. attribute:: retries: int
The number of times this task can automatically be retried in case
of failure. If a task fails executing (the worker returns with
an error), its :attr:`retries` attribute is checked. If it is
equal to 0, the task is marked "erred". If it is greater than 0,
the :attr:`retries` attribute is decremented and execution is
attempted again.
.. attribute:: nbytes: int (or None)
The number of bytes, as determined by ``sizeof``, of the result
of a finished task. This number is used for diagnostics and to
help prioritize work.
.. attribute:: type: str
The type of the object as a string. Only present for tasks that have
been computed.
.. attribute:: exception: object
If this task failed executing, the exception object is stored here.
Otherwise this is ``None``.
.. attribute:: traceback: object
If this task failed executing, the traceback object is stored here.
Otherwise this is ``None``.
.. attribute:: exception_blame: TaskState (or None)
If this task or one of its dependencies failed executing, the
failed task is stored here (possibly itself). Otherwise this
is ``None``.
.. attribute:: suspicious: int
The number of times this task has been involved in a worker death.
Some tasks may cause workers to die (such as calling ``os._exit(0)``).
When a worker dies, all of the tasks on that worker are reassigned
to others. This combination of behaviors can cause a bad task to
catastrophically destroy all workers on the cluster, one after
another. Whenever a worker dies, we mark each task currently
processing on that worker (as recorded by
:attr:`WorkerState.processing`) as suspicious.
If a task is involved in three deaths (or some other fixed constant)
then we mark the task as ``erred``.
.. attribute:: host_restrictions: {hostnames}
A set of hostnames where this task can be run (or ``None`` if empty).
Usually this is empty unless the task has been specifically restricted
to only run on certain hosts. A hostname may correspond to one or
several connected workers.
.. attribute:: worker_restrictions: {worker addresses}
A set of complete worker addresses where this can be run (or ``None``
if empty). Usually this is empty unless the task has been specifically
restricted to only run on certain workers.
Note this is tracking worker addresses, not worker states, since
the specific workers may not be connected at this time.
.. attribute:: resource_restrictions: {resource: quantity}
Resources required by this task, such as ``{'gpu': 1}`` or
``{'memory': 1e9}`` (or ``None`` if empty). These are user-defined
names and are matched against the contents of each
:attr:`WorkerState.resources` dictionary.
.. attribute:: loose_restrictions: bool
If ``False``, each of :attr:`host_restrictions`,
:attr:`worker_restrictions` and :attr:`resource_restrictions` is
a hard constraint: if no worker is available satisfying those
restrictions, the task cannot go into the "processing" state and
will instead go into the "no-worker" state.
If ``True``, the above restrictions are mere preferences: if no worker
is available satisfying those restrictions, the task can still go
into the "processing" state and be sent for execution to another
connected worker.
.. attribute: actor: bool
Whether or not this task is an Actor.
.. attribute: group: TaskGroup
: The group of tasks to which this one belongs.
"""
__slots__ = (
# === General description ===
"actor",
# Key name
"key",
# Key prefix (see key_split())
"prefix",
# How to run the task (None if pure data)
"run_spec",
# Alive dependents and dependencies
"dependencies",
"dependents",
# Compute priority
"priority",
# Restrictions
"host_restrictions",
"worker_restrictions", # not WorkerStates but addresses
"resource_restrictions",
"loose_restrictions",
# === Task state ===
"_state",
# Whether some dependencies were forgotten
"has_lost_dependencies",
# If in 'waiting' state, which tasks need to complete
# before we can run
"waiting_on",
# If in 'waiting' or 'processing' state, which tasks needs us
# to complete before they can run
"waiters",
# In in 'processing' state, which worker we are processing on
"processing_on",
# If in 'memory' state, Which workers have us
"who_has",
# Which clients want us
"who_wants",
"exception",
"traceback",
"exception_blame",
"suspicious",
"retries",
"nbytes",
"type",
"group_key",
"group",
)
def __init__(self, key, run_spec):
self.key = key
self.run_spec = run_spec
self._state = None
self.exception = self.traceback = self.exception_blame = None
self.suspicious = self.retries = 0
self.nbytes = None
self.priority = None
self.who_wants = set()
self.dependencies = set()
self.dependents = set()
self.waiting_on = set()
self.waiters = set()
self.who_has = set()
self.processing_on = None
self.has_lost_dependencies = False
self.host_restrictions = None
self.worker_restrictions = None
self.resource_restrictions = None
self.loose_restrictions = False
self.actor = None
self.type = None
self.group_key = key_split_group(key)
self.group = None
@property
def state(self) -> str:
return self._state
@property
def prefix_key(self):
return self.prefix.name
@state.setter
def state(self, value: str):
self.group.states[self._state] -= 1
self.group.states[value] += 1
self._state = value
def add_dependency(self, other: "TaskState"):
""" Add another task as a dependency of this task """
self.dependencies.add(other)
self.group.dependencies.add(other.group)
other.dependents.add(self)
def get_nbytes(self) -> int:
nbytes = self.nbytes
return nbytes if nbytes is not None else DEFAULT_DATA_SIZE
def set_nbytes(self, nbytes: int):
old_nbytes = self.nbytes
diff = nbytes - (old_nbytes or 0)
self.group.nbytes_total += diff
self.group.nbytes_in_memory += diff
for ws in self.who_has:
ws.nbytes += diff
self.nbytes = nbytes
def __repr__(self):
return "<Task %r %s>" % (self.key, self.state)
def validate(self):
try:
for cs in self.who_wants:
assert isinstance(cs, ClientState), (repr(cs), self.who_wants)
for ws in self.who_has:
assert isinstance(ws, WorkerState), (repr(ws), self.who_has)
for ts in self.dependencies:
assert isinstance(ts, TaskState), (repr(ts), self.dependencies)
for ts in self.dependents:
assert isinstance(ts, TaskState), (repr(ts), self.dependents)
validate_task_state(self)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
class TaskGroup:
""" Collection tracking all tasks within a group
Keys often have a structure like ``("x-123", 0)``
A group takes the first section, like ``"x-123"``
.. attribute:: name: str
The name of a group of tasks.
For a task like ``("x-123", 0)`` this is the text ``"x-123"``
.. attribute:: states: Dict[str, int]
The number of tasks in each state,
like ``{"memory": 10, "processing": 3, "released": 4, ...}``
.. attribute:: dependencies: Set[TaskGroup]
The other TaskGroups on which this one depends
.. attribute:: nbytes_total: int
The total number of bytes that this task group has produced
.. attribute:: nbytes_in_memory: int
The number of bytes currently stored by this TaskGroup
.. attribute:: duration: float
The total amount of time spent on all tasks in this TaskGroup
.. attribute:: types: Set[str]
The result types of this TaskGroup
See also
--------
TaskPrefix
"""
def __init__(self, name):
self.name = name
self.states = {state: 0 for state in ALL_TASK_STATES}
self.states["forgotten"] = 0
self.dependencies = set()
self.nbytes_total = 0
self.nbytes_in_memory = 0
self.duration = 0
self.types = set()
def add(self, ts):
self.states[ts.state] += 1
ts.group = self
def __repr__(self):
return (
"<"
+ (self.name or "no-group")
+ ": "
+ ", ".join(
"%s: %d" % (k, v) for (k, v) in sorted(self.states.items()) if v
)
+ ">"
)
def __len__(self):
return sum(self.states.values())
class TaskPrefix:
""" Collection tracking all tasks within a group
Keys often have a structure like ``("x-123", 0)``
A group takes the first section, like ``"x"``
.. attribute:: name: str
The name of a group of tasks.
For a task like ``("x-123", 0)`` this is the text ``"x"``
.. attribute:: states: Dict[str, int]
The number of tasks in each state,
like ``{"memory": 10, "processing": 3, "released": 4, ...}``
.. attribute:: duration_average: float
An exponentially weighted moving average duration of all tasks with this prefix
.. attribute:: suspicious: int
Numbers of times a task was marked as suspicious with this prefix
See Also
--------
TaskGroup
"""
def __init__(self, name):
self.name = name
self.groups = []
# store timings for each prefix-action
self.all_durations = defaultdict(float)
if self.name in dask.config.get("distributed.scheduler.default-task-durations"):
self.duration_average = parse_timedelta(
dask.config.get("distributed.scheduler.default-task-durations")[
self.name
]
)
else:
self.duration_average = None
self.suspicious = 0
@property
def states(self):
return merge_with(sum, [g.states for g in self.groups])
@property
def active(self):
return [
g
for g in self.groups
if any(v != 0 for k, v in g.states.items() if k != "forgotten")
]
@property
def active_states(self):
return merge_with(sum, [g.states for g in self.active])
def __repr__(self):
return (
"<"
+ self.name
+ ": "
+ ", ".join(
"%s: %d" % (k, v) for (k, v) in sorted(self.states.items()) if v
)
+ ">"
)
@property
def nbytes_in_memory(self):
return sum(tg.nbytes_in_memory for tg in self.groups)
@property
def nbytes_total(self):
return sum(tg.nbytes_total for tg in self.groups)
def __len__(self):
return sum(map(len, self.groups))
@property
def duration(self):
return sum(tg.duration for tg in self.groups)
@property
def types(self):
return set().union(*[tg.types for tg in self.groups])
class _StateLegacyMapping(Mapping):
"""
A mapping interface mimicking the former Scheduler state dictionaries.
"""
def __init__(self, states, accessor):
self._states = states
self._accessor = accessor
def __iter__(self):
return iter(self._states)
def __len__(self):
return len(self._states)
def __getitem__(self, key):
return self._accessor(self._states[key])
def __repr__(self):
return "%s(%s)" % (self.__class__, dict(self))
class _OptionalStateLegacyMapping(_StateLegacyMapping):
"""
Similar to _StateLegacyMapping, but a false-y value is interpreted
as a missing key.
"""
# For tasks etc.
def __iter__(self):
accessor = self._accessor
for k, v in self._states.items():
if accessor(v):
yield k
def __len__(self):
accessor = self._accessor
return sum(bool(accessor(v)) for v in self._states.values())
def __getitem__(self, key):
v = self._accessor(self._states[key])
if v:
return v
else:
raise KeyError
class _StateLegacySet(Set):
"""
Similar to _StateLegacyMapping, but exposes a set containing
all values with a true value.
"""
# For loose_restrictions
def __init__(self, states, accessor):
self._states = states
self._accessor = accessor
def __iter__(self):
return (k for k, v in self._states.items() if self._accessor(v))
def __len__(self):
return sum(map(bool, map(self._accessor, self._states.values())))
def __contains__(self, k):
st = self._states.get(k)
return st is not None and bool(self._accessor(st))
def __repr__(self):
return "%s(%s)" % (self.__class__, set(self))
def _legacy_task_key_set(tasks):
"""
Transform a set of task states into a set of task keys.
"""
return {ts.key for ts in tasks}
def _legacy_client_key_set(clients):
"""
Transform a set of client states into a set of client keys.
"""
return {cs.client_key for cs in clients}
def _legacy_worker_key_set(workers):
"""
Transform a set of worker states into a set of worker keys.
"""
return {ws.address for ws in workers}
def _legacy_task_key_dict(task_dict):
"""
Transform a dict of {task state: value} into a dict of {task key: value}.
"""
return {ts.key: value for ts, value in task_dict.items()}
def _task_key_or_none(task):
return task.key if task is not None else None
class Scheduler(ServerNode):
""" Dynamic distributed task scheduler
The scheduler tracks the current state of workers, data, and computations.
The scheduler listens for events and responds by controlling workers
appropriately. It continuously tries to use the workers to execute an ever
growing dask graph.
All events are handled quickly, in linear time with respect to their input
(which is often of constant size) and generally within a millisecond. To
accomplish this the scheduler tracks a lot of state. Every operation
maintains the consistency of this state.
The scheduler communicates with the outside world through Comm objects.
It maintains a consistent and valid view of the world even when listening
to several clients at once.
A Scheduler is typically started either with the ``dask-scheduler``
executable::
$ dask-scheduler
Scheduler started at 127.0.0.1:8786
Or within a LocalCluster a Client starts up without connection
information::
>>> c = Client() # doctest: +SKIP
>>> c.cluster.scheduler # doctest: +SKIP
Scheduler(...)
Users typically do not interact with the scheduler directly but rather with
the client object ``Client``.
**State**
The scheduler contains the following state variables. Each variable is
listed along with what it stores and a brief description.
* **tasks:** ``{task key: TaskState}``
Tasks currently known to the scheduler
* **unrunnable:** ``{TaskState}``
Tasks in the "no-worker" state
* **workers:** ``{worker key: WorkerState}``
Workers currently connected to the scheduler
* **idle:** ``{WorkerState}``:
Set of workers that are not fully utilized
* **saturated:** ``{WorkerState}``:
Set of workers that are not over-utilized
* **host_info:** ``{hostname: dict}``:
Information about each worker host
* **clients:** ``{client key: ClientState}``
Clients currently connected to the scheduler
* **services:** ``{str: port}``:
Other services running on this scheduler, like Bokeh
* **loop:** ``IOLoop``:
The running Tornado IOLoop
* **client_comms:** ``{client key: Comm}``
For each client, a Comm object used to receive task requests and
report task status updates.
* **stream_comms:** ``{worker key: Comm}``
For each worker, a Comm object from which we both accept stimuli and
report results
* **task_duration:** ``{key-prefix: time}``
Time we expect certain functions to take, e.g. ``{'sum': 0.25}``
"""
default_port = 8786
_instances = weakref.WeakSet()
def __init__(
self,
loop=None,
delete_interval="500ms",
synchronize_worker_interval="60s",
services=None,
service_kwargs=None,
allowed_failures=None,
extensions=None,
validate=None,
scheduler_file=None,
security=None,
worker_ttl=None,
idle_timeout=None,
interface=None,
host=None,
port=0,
protocol=None,
dashboard_address=None,
dashboard=None,
http_prefix="/",
preload=None,
preload_argv=(),
plugins=(),
**kwargs,
):
self._setup_logging(logger)
# Attributes
if allowed_failures is None:
allowed_failures = dask.config.get("distributed.scheduler.allowed-failures")
self.allowed_failures = allowed_failures
if validate is None:
validate = dask.config.get("distributed.scheduler.validate")
self.validate = validate
self.proc = psutil.Process()
self.delete_interval = parse_timedelta(delete_interval, default="ms")
self.synchronize_worker_interval = parse_timedelta(
synchronize_worker_interval, default="ms"
)
self.digests = None
self.service_specs = services or {}
self.service_kwargs = service_kwargs or {}
self.services = {}
self.scheduler_file = scheduler_file
worker_ttl = worker_ttl or dask.config.get("distributed.scheduler.worker-ttl")
self.worker_ttl = parse_timedelta(worker_ttl) if worker_ttl else None
idle_timeout = idle_timeout or dask.config.get(
"distributed.scheduler.idle-timeout"
)
if idle_timeout:
self.idle_timeout = parse_timedelta(idle_timeout)
else:
self.idle_timeout = None
self.idle_since = time()
self._lock = asyncio.Lock()
self.bandwidth = parse_bytes(dask.config.get("distributed.scheduler.bandwidth"))
self.bandwidth_workers = defaultdict(float)
self.bandwidth_types = defaultdict(float)
if not preload:
preload = dask.config.get("distributed.scheduler.preload")
if not preload_argv:
preload_argv = dask.config.get("distributed.scheduler.preload-argv")
self.preloads = preloading.process_preloads(self, preload, preload_argv)
if isinstance(security, dict):
security = Security(**security)
self.security = security or Security()
assert isinstance(self.security, Security)
self.connection_args = self.security.get_connection_args("scheduler")
self._start_address = addresses_from_user_args(
host=host,
port=port,
interface=interface,
protocol=protocol,
security=security,
default_port=self.default_port,
)
routes = get_handlers(
server=self,
modules=dask.config.get("distributed.scheduler.http.routes"),
prefix=http_prefix,
)
self.start_http_server(routes, dashboard_address, default_port=8787)
if dashboard or (dashboard is None and dashboard_address):
try:
import distributed.dashboard.scheduler
except ImportError:
logger.debug("To start diagnostics web server please install Bokeh")
else:
distributed.dashboard.scheduler.connect(
self.http_application, self.http_server, self, prefix=http_prefix
)
# Communication state
self.loop = loop or IOLoop.current()
self.client_comms = dict()
self.stream_comms = dict()
self._worker_coroutines = []
self._ipython_kernel = None
# Task state
self.tasks = dict()
self.task_groups = dict()
self.task_prefixes = dict()
for old_attr, new_attr, wrap in [
("priority", "priority", None),
("dependencies", "dependencies", _legacy_task_key_set),
("dependents", "dependents", _legacy_task_key_set),
("retries", "retries", None),
]:
func = operator.attrgetter(new_attr)
if wrap is not None:
func = compose(wrap, func)
setattr(self, old_attr, _StateLegacyMapping(self.tasks, func))
for old_attr, new_attr, wrap in [
("nbytes", "nbytes", None),
("who_wants", "who_wants", _legacy_client_key_set),
("who_has", "who_has", _legacy_worker_key_set),
("waiting", "waiting_on", _legacy_task_key_set),
("waiting_data", "waiters", _legacy_task_key_set),
("rprocessing", "processing_on", None),
("host_restrictions", "host_restrictions", None),
("worker_restrictions", "worker_restrictions", None),
("resource_restrictions", "resource_restrictions", None),
("suspicious_tasks", "suspicious", None),
("exceptions", "exception", None),
("tracebacks", "traceback", None),
("exceptions_blame", "exception_blame", _task_key_or_none),
]:
func = operator.attrgetter(new_attr)
if wrap is not None:
func = compose(wrap, func)
setattr(self, old_attr, _OptionalStateLegacyMapping(self.tasks, func))
for old_attr, new_attr, wrap in [
("loose_restrictions", "loose_restrictions", None)
]:
func = operator.attrgetter(new_attr)
if wrap is not None:
func = compose(wrap, func)
setattr(self, old_attr, _StateLegacySet(self.tasks, func))
self.generation = 0
self._last_client = None
self._last_time = 0
self.unrunnable = set()
self.n_tasks = 0
self.task_metadata = dict()
self.datasets = dict()
# Prefix-keyed containers
self.unknown_durations = defaultdict(set)
# Client state
self.clients = dict()
for old_attr, new_attr, wrap in [
("wants_what", "wants_what", _legacy_task_key_set)
]:
func = operator.attrgetter(new_attr)
if wrap is not None:
func = compose(wrap, func)
setattr(self, old_attr, _StateLegacyMapping(self.clients, func))
self.clients["fire-and-forget"] = ClientState("fire-and-forget")
# Worker state
self.workers = sortedcontainers.SortedDict()
for old_attr, new_attr, wrap in [
("nthreads", "nthreads", None),
("worker_bytes", "nbytes", None),
("worker_resources", "resources", None),
("used_resources", "used_resources", None),
("occupancy", "occupancy", None),
("worker_info", "metrics", None),
("processing", "processing", _legacy_task_key_dict),
("has_what", "has_what", _legacy_task_key_set),
]:
func = operator.attrgetter(new_attr)
if wrap is not None:
func = compose(wrap, func)
setattr(self, old_attr, _StateLegacyMapping(self.workers, func))
self.idle = sortedcontainers.SortedSet(key=operator.attrgetter("address"))
self.saturated = set()
self.total_nthreads = 0
self.total_occupancy = 0
self.host_info = defaultdict(dict)
self.resources = defaultdict(dict)
self.aliases = dict()
self._task_state_collections = [self.unrunnable]
self._worker_collections = [
self.workers,
self.host_info,
self.resources,
self.aliases,
]
self.extensions = {}
self.plugins = list(plugins)
self.transition_log = deque(
maxlen=dask.config.get("distributed.scheduler.transition-log-length")
)
self.log = deque(
maxlen=dask.config.get("distributed.scheduler.transition-log-length")
)
self.worker_plugins = []
worker_handlers = {
"task-finished": self.handle_task_finished,
"task-erred": self.handle_task_erred,
"release": self.handle_release_data,
"release-worker-data": self.release_worker_data,
"add-keys": self.add_keys,
"missing-data": self.handle_missing_data,
"long-running": self.handle_long_running,
"reschedule": self.reschedule,
"keep-alive": lambda *args, **kwargs: None,
}
client_handlers = {
"update-graph": self.update_graph,
"client-desires-keys": self.client_desires_keys,
"update-data": self.update_data,
"report-key": self.report_on_key,
"client-releases-keys": self.client_releases_keys,
"heartbeat-client": self.client_heartbeat,
"close-client": self.remove_client,
"restart": self.restart,
}
self.handlers = {
"register-client": self.add_client,
"scatter": self.scatter,
"register-worker": self.add_worker,
"unregister": self.remove_worker,
"gather": self.gather,
"cancel": self.stimulus_cancel,
"retry": self.stimulus_retry,
"feed": self.feed,
"terminate": self.close,
"broadcast": self.broadcast,
"proxy": self.proxy,
"ncores": self.get_ncores,
"has_what": self.get_has_what,
"who_has": self.get_who_has,
"processing": self.get_processing,
"call_stack": self.get_call_stack,
"profile": self.get_profile,
"performance_report": self.performance_report,
"get_logs": self.get_logs,
"logs": self.get_logs,
"worker_logs": self.get_worker_logs,
"nbytes": self.get_nbytes,
"versions": self.versions,
"add_keys": self.add_keys,
"rebalance": self.rebalance,
"replicate": self.replicate,
"start_ipython": self.start_ipython,
"run_function": self.run_function,
"update_data": self.update_data,
"set_resources": self.add_resources,
"retire_workers": self.retire_workers,
"get_metadata": self.get_metadata,
"set_metadata": self.set_metadata,
"heartbeat_worker": self.heartbeat_worker,
"get_task_status": self.get_task_status,
"get_task_stream": self.get_task_stream,
"register_worker_plugin": self.register_worker_plugin,
"adaptive_target": self.adaptive_target,
"workers_to_close": self.workers_to_close,
"subscribe_worker_status": self.subscribe_worker_status,
}
self._transitions = {
("released", "waiting"): self.transition_released_waiting,
("waiting", "released"): self.transition_waiting_released,
("waiting", "processing"): self.transition_waiting_processing,
("waiting", "memory"): self.transition_waiting_memory,
("processing", "released"): self.transition_processing_released,
("processing", "memory"): self.transition_processing_memory,
("processing", "erred"): self.transition_processing_erred,
("no-worker", "released"): self.transition_no_worker_released,
("no-worker", "waiting"): self.transition_no_worker_waiting,
("released", "forgotten"): self.transition_released_forgotten,
("memory", "forgotten"): self.transition_memory_forgotten,
("erred", "forgotten"): self.transition_released_forgotten,
("erred", "released"): self.transition_erred_released,
("memory", "released"): self.transition_memory_released,
("released", "erred"): self.transition_released_erred,
}
connection_limit = get_fileno_limit() / 2
super(Scheduler, self).__init__(
handlers=self.handlers,
stream_handlers=merge(worker_handlers, client_handlers),
io_loop=self.loop,
connection_limit=connection_limit,
deserialize=False,
connection_args=self.connection_args,
**kwargs,
)
if self.worker_ttl:
pc = PeriodicCallback(self.check_worker_ttl, self.worker_ttl)
self.periodic_callbacks["worker-ttl"] = pc
if self.idle_timeout:
pc = PeriodicCallback(self.check_idle, self.idle_timeout / 4)
self.periodic_callbacks["idle-timeout"] = pc
if extensions is None:
extensions = list(DEFAULT_EXTENSIONS)
if dask.config.get("distributed.scheduler.work-stealing"):
extensions.append(WorkStealing)
for ext in extensions:
ext(self)
setproctitle("dask-scheduler [not started]")
Scheduler._instances.add(self)
self.rpc.allow_offload = False
self.status = Status.undefined
@property
def status(self):
return self._status
@status.setter
def status(self, new_status):
if isinstance(new_status, Status):
self._status = new_status
elif isinstance(new_status, str) or new_status is None:
corresponding_enum_variants = [s for s in Status if s.value == new_status]
assert len(corresponding_enum_variants) == 1
self._status = corresponding_enum_variants[0]
##################
# Administration #
##################
def __repr__(self):
return '<Scheduler: "%s" processes: %d cores: %d>' % (
self.address,
len(self.workers),
self.total_nthreads,
)
def identity(self, comm=None):
""" Basic information about ourselves and our cluster """
d = {
"type": type(self).__name__,
"id": str(self.id),
"address": self.address,
"services": {key: v.port for (key, v) in self.services.items()},
"workers": {
worker.address: worker.identity() for worker in self.workers.values()
},
}
return d
def get_worker_service_addr(self, worker, service_name, protocol=False):
"""
Get the (host, port) address of the named service on the *worker*.
Returns None if the service doesn't exist.
Parameters
----------
worker : address
service_name : str
Common services include 'bokeh' and 'nanny'
protocol : boolean
Whether or not to include a full address with protocol (True)
or just a (host, port) pair
"""
ws = self.workers[worker]
port = ws.services.get(service_name)
if port is None:
return None
elif protocol:
return "%(protocol)s://%(host)s:%(port)d" % {
"protocol": ws.address.split("://")[0],
"host": ws.host,
"port": port,
}
else:
return ws.host, port
async def start(self):
""" Clear out old state and restart all running coroutines """
await super().start()
assert self.status != Status.running
enable_gc_diagnosis()
self.clear_task_state()
with suppress(AttributeError):
for c in self._worker_coroutines:
c.cancel()
for addr in self._start_address:
await self.listen(
addr, allow_offload=False, **self.security.get_listen_args("scheduler")
)
self.ip = get_address_host(self.listen_address)
listen_ip = self.ip
if listen_ip == "0.0.0.0":
listen_ip = ""
if self.address.startswith("inproc://"):
listen_ip = "localhost"
# Services listen on all addresses
self.start_services(listen_ip)
for listener in self.listeners:
logger.info(" Scheduler at: %25s", listener.contact_address)
for k, v in self.services.items():
logger.info("%11s at: %25s", k, "%s:%d" % (listen_ip, v.port))
self.loop.add_callback(self.reevaluate_occupancy)
if self.scheduler_file:
with open(self.scheduler_file, "w") as f:
json.dump(self.identity(), f, indent=2)
fn = self.scheduler_file # remove file when we close the process
def del_scheduler_file():
if os.path.exists(fn):
os.remove(fn)
weakref.finalize(self, del_scheduler_file)
for preload in self.preloads:
await preload.start()
await asyncio.gather(*[plugin.start(self) for plugin in self.plugins])
self.start_periodic_callbacks()
setproctitle("dask-scheduler [%s]" % (self.address,))
return self
async def close(self, comm=None, fast=False, close_workers=False):
""" Send cleanup signal to all coroutines then wait until finished
See Also
--------
Scheduler.cleanup
"""
if self.status in (Status.closing, Status.closed, Status.closing_gracefully):
await self.finished()
return
self.status = Status.closing
logger.info("Scheduler closing...")
setproctitle("dask-scheduler [closing]")
for preload in self.preloads:
await preload.teardown()
if close_workers:
await self.broadcast(msg={"op": "close_gracefully"}, nanny=True)
for worker in self.workers:
self.worker_send(worker, {"op": "close"})
for i in range(20): # wait a second for send signals to clear
if self.workers:
await asyncio.sleep(0.05)
else:
break
await asyncio.gather(*[plugin.close() for plugin in self.plugins])
for pc in self.periodic_callbacks.values():
pc.stop()
self.periodic_callbacks.clear()
self.stop_services()
for ext in self.extensions.values():
with suppress(AttributeError):
ext.teardown()
logger.info("Scheduler closing all comms")
futures = []
for w, comm in list(self.stream_comms.items()):
if not comm.closed():
comm.send({"op": "close", "report": False})
comm.send({"op": "close-stream"})
with suppress(AttributeError):
futures.append(comm.close())
for future in futures: # TODO: do all at once
await future
for comm in self.client_comms.values():
comm.abort()
await self.rpc.close()
self.status = Status.closed
self.stop()
await super(Scheduler, self).close()
setproctitle("dask-scheduler [closed]")
disable_gc_diagnosis()
async def close_worker(self, comm=None, worker=None, safe=None):
""" Remove a worker from the cluster
This both removes the worker from our local state and also sends a
signal to the worker to shut down. This works regardless of whether or
not the worker has a nanny process restarting it
"""
logger.info("Closing worker %s", worker)
with log_errors():
self.log_event(worker, {"action": "close-worker"})
nanny_addr = self.workers[worker].nanny
address = nanny_addr or worker
self.worker_send(worker, {"op": "close", "report": False})
await self.remove_worker(address=worker, safe=safe)
###########
# Stimuli #
###########
def heartbeat_worker(
self,
comm=None,
address=None,
resolve_address=True,
now=None,
resources=None,
host_info=None,
metrics=None,
):
address = self.coerce_address(address, resolve_address)
address = normalize_address(address)
if address not in self.workers:
return {"status": "missing"}
host = get_address_host(address)
local_now = time()
now = now or time()
assert metrics
host_info = host_info or {}
self.host_info[host]["last-seen"] = local_now
frac = 1 / len(self.workers)
self.bandwidth = (
self.bandwidth * (1 - frac) + metrics["bandwidth"]["total"] * frac
)
for other, (bw, count) in metrics["bandwidth"]["workers"].items():
if (address, other) not in self.bandwidth_workers:
self.bandwidth_workers[address, other] = bw / count
else:
alpha = (1 - frac) ** count
self.bandwidth_workers[address, other] = self.bandwidth_workers[
address, other
] * alpha + bw * (1 - alpha)
for typ, (bw, count) in metrics["bandwidth"]["types"].items():
if typ not in self.bandwidth_types:
self.bandwidth_types[typ] = bw / count
else:
alpha = (1 - frac) ** count
self.bandwidth_types[typ] = self.bandwidth_types[typ] * alpha + bw * (
1 - alpha
)
ws = self.workers[address]
ws.last_seen = time()
if metrics:
ws.metrics = metrics
if host_info:
self.host_info[host].update(host_info)
delay = time() - now
ws.time_delay = delay
if resources:
self.add_resources(worker=address, resources=resources)
self.log_event(address, merge({"action": "heartbeat"}, metrics))
return {
"status": "OK",
"time": time(),
"heartbeat-interval": heartbeat_interval(len(self.workers)),
}
async def add_worker(
self,
comm=None,
address=None,
keys=(),
nthreads=None,
name=None,
resolve_address=True,
nbytes=None,
types=None,
now=None,
resources=None,
host_info=None,
memory_limit=None,
metrics=None,
pid=0,
services=None,
local_directory=None,
versions=None,
nanny=None,
extra=None,
):
""" Add a new worker to the cluster """
with log_errors():
address = self.coerce_address(address, resolve_address)
address = normalize_address(address)
host = get_address_host(address)
ws = self.workers.get(address)
if ws is not None:
raise ValueError("Worker already exists %s" % ws)
if name in self.aliases:
msg = {
"status": "error",
"message": "name taken, %s" % name,
"time": time(),
}
if comm:
await comm.write(msg)
return
self.workers[address] = ws = WorkerState(
address=address,
pid=pid,
nthreads=nthreads,
memory_limit=memory_limit,
name=name,
local_directory=local_directory,
services=services,
versions=versions,
nanny=nanny,
extra=extra,
)
if "addresses" not in self.host_info[host]:
self.host_info[host].update({"addresses": set(), "nthreads": 0})
self.host_info[host]["addresses"].add(address)
self.host_info[host]["nthreads"] += nthreads
self.total_nthreads += nthreads
self.aliases[name] = address
response = self.heartbeat_worker(
address=address,
resolve_address=resolve_address,
now=now,
resources=resources,
host_info=host_info,
metrics=metrics,
)
# Do not need to adjust self.total_occupancy as self.occupancy[ws] cannot exist before this.
self.check_idle_saturated(ws)
# for key in keys: # TODO
# self.mark_key_in_memory(key, [address])
self.stream_comms[address] = BatchedSend(interval="5ms", loop=self.loop)
if ws.nthreads > len(ws.processing):
self.idle.add(ws)
for plugin in self.plugins[:]:
try:
result = plugin.add_worker(scheduler=self, worker=address)
if inspect.isawaitable(result):
await result
except Exception as e:
logger.exception(e)
if nbytes:
for key in nbytes:
ts = self.tasks.get(key)
if ts is not None and ts.state in ("processing", "waiting"):
recommendations = self.transition(
key,
"memory",
worker=address,
nbytes=nbytes[key],
typename=types[key],
)
self.transitions(recommendations)
recommendations = {}
for ts in list(self.unrunnable):
valid = self.valid_workers(ts)
if valid is True or ws in valid:
recommendations[ts.key] = "waiting"
if recommendations:
self.transitions(recommendations)
self.log_event(address, {"action": "add-worker"})
self.log_event("all", {"action": "add-worker", "worker": address})
logger.info("Register worker %s", ws)
msg = {
"status": "OK",
"time": time(),
"heartbeat-interval": heartbeat_interval(len(self.workers)),
"worker-plugins": self.worker_plugins,
}
version_warning = version_module.error_message(
version_module.get_versions(),
merge(
{w: ws.versions for w, ws in self.workers.items()},
{c: cs.versions for c, cs in self.clients.items() if cs.versions},
),
versions,
client_name="This Worker",
)
if version_warning:
msg["warning"] = version_warning
if comm:
await comm.write(msg)
await self.handle_worker(comm=comm, worker=address)
def update_graph(
self,
client=None,
tasks=None,
keys=None,
dependencies=None,
restrictions=None,
priority=None,
loose_restrictions=None,
resources=None,
submitting_task=None,
retries=None,
user_priority=0,
actors=None,
fifo_timeout=0,
):
"""
Add new computations to the internal dask graph
This happens whenever the Client calls submit, map, get, or compute.
"""
start = time()
fifo_timeout = parse_timedelta(fifo_timeout)
keys = set(keys)
if len(tasks) > 1:
self.log_event(
["all", client], {"action": "update_graph", "count": len(tasks)}
)
# Remove aliases
for k in list(tasks):
if tasks[k] is k:
del tasks[k]
dependencies = dependencies or {}
n = 0
while len(tasks) != n: # walk through new tasks, cancel any bad deps
n = len(tasks)
for k, deps in list(dependencies.items()):
if any(
dep not in self.tasks and dep not in tasks for dep in deps
): # bad key
logger.info("User asked for computation on lost data, %s", k)
del tasks[k]
del dependencies[k]
if k in keys:
keys.remove(k)
self.report({"op": "cancelled-key", "key": k}, client=client)
self.client_releases_keys(keys=[k], client=client)
# Remove any self-dependencies (happens on test_publish_bag() and others)
for k, v in dependencies.items():
deps = set(v)
if k in deps:
deps.remove(k)
dependencies[k] = deps
# Avoid computation that is already finished
already_in_memory = set() # tasks that are already done
for k, v in dependencies.items():
if v and k in self.tasks and self.tasks[k].state in ("memory", "erred"):
already_in_memory.add(k)
if already_in_memory:
dependents = dask.core.reverse_dict(dependencies)
stack = list(already_in_memory)
done = set(already_in_memory)
while stack: # remove unnecessary dependencies
key = stack.pop()
ts = self.tasks[key]
try:
deps = dependencies[key]
except KeyError:
deps = self.dependencies[key]
for dep in deps:
if dep in dependents:
child_deps = dependents[dep]
else:
child_deps = self.dependencies[dep]
if all(d in done for d in child_deps):
if dep in self.tasks and dep not in done:
done.add(dep)
stack.append(dep)
for d in done:
tasks.pop(d, None)
dependencies.pop(d, None)
# Get or create task states
stack = list(keys)
touched_keys = set()
touched_tasks = []
while stack:
k = stack.pop()
if k in touched_keys:
continue
# XXX Have a method get_task_state(self, k) ?
ts = self.tasks.get(k)
if ts is None:
ts = self.new_task(k, tasks.get(k), "released")
elif not ts.run_spec:
ts.run_spec = tasks.get(k)
touched_keys.add(k)
touched_tasks.append(ts)
stack.extend(dependencies.get(k, ()))
self.client_desires_keys(keys=keys, client=client)
# Add dependencies
for key, deps in dependencies.items():
ts = self.tasks.get(key)
if ts is None or ts.dependencies:
continue
for dep in deps:
dts = self.tasks[dep]
ts.add_dependency(dts)
# Compute priorities
if isinstance(user_priority, Number):
user_priority = {k: user_priority for k in tasks}
# Add actors
if actors is True:
actors = list(keys)
for actor in actors or []:
self.tasks[actor].actor = True
priority = priority or dask.order.order(
tasks
) # TODO: define order wrt old graph
if submitting_task: # sub-tasks get better priority than parent tasks
ts = self.tasks.get(submitting_task)
if ts is not None:
generation = ts.priority[0] - 0.01
else: # super-task already cleaned up
generation = self.generation
elif self._last_time + fifo_timeout < start:
self.generation += 1 # older graph generations take precedence
generation = self.generation
self._last_time = start
else:
generation = self.generation
for key in set(priority) & touched_keys:
ts = self.tasks[key]
if ts.priority is None:
ts.priority = (-(user_priority.get(key, 0)), generation, priority[key])
# Ensure all runnables have a priority
runnables = [ts for ts in touched_tasks if ts.run_spec]
for ts in runnables:
if ts.priority is None and ts.run_spec:
ts.priority = (self.generation, 0)
if restrictions:
# *restrictions* is a dict keying task ids to lists of
# restriction specifications (either worker names or addresses)
for k, v in restrictions.items():
if v is None:
continue
ts = self.tasks.get(k)
if ts is None:
continue
ts.host_restrictions = set()
ts.worker_restrictions = set()
for w in v:
try:
w = self.coerce_address(w)
except ValueError:
# Not a valid address, but perhaps it's a hostname
ts.host_restrictions.add(w)
else:
ts.worker_restrictions.add(w)
if loose_restrictions:
for k in loose_restrictions:
ts = self.tasks[k]
ts.loose_restrictions = True
if resources:
for k, v in resources.items():
if v is None:
continue
assert isinstance(v, dict)
ts = self.tasks.get(k)
if ts is None:
continue
ts.resource_restrictions = v
if retries:
for k, v in retries.items():
assert isinstance(v, int)
ts = self.tasks.get(k)
if ts is None:
continue
ts.retries = v
# Compute recommendations
recommendations = {}
for ts in sorted(runnables, key=operator.attrgetter("priority"), reverse=True):
if ts.state == "released" and ts.run_spec:
recommendations[ts.key] = "waiting"
for ts in touched_tasks:
for dts in ts.dependencies:
if dts.exception_blame:
ts.exception_blame = dts.exception_blame
recommendations[ts.key] = "erred"
break
for plugin in self.plugins[:]:
try:
plugin.update_graph(
self,
client=client,
tasks=tasks,
keys=keys,
restrictions=restrictions or {},
dependencies=dependencies,
priority=priority,
loose_restrictions=loose_restrictions,
resources=resources,
)
except Exception as e:
logger.exception(e)
self.transitions(recommendations)
for ts in touched_tasks:
if ts.state in ("memory", "erred"):
self.report_on_key(ts.key, client=client)
end = time()
if self.digests is not None:
self.digests["update-graph-duration"].add(end - start)
# TODO: balance workers
def new_task(self, key, spec, state):
""" Create a new task, and associated states """
ts = TaskState(key, spec)
ts._state = state
prefix_key = key_split(key)
try:
tp = self.task_prefixes[prefix_key]
except KeyError:
tp = self.task_prefixes[prefix_key] = TaskPrefix(prefix_key)
ts.prefix = tp
group_key = ts.group_key
try:
tg = self.task_groups[group_key]
except KeyError:
tg = self.task_groups[group_key] = TaskGroup(group_key)
tg.prefix = tp
tp.groups.append(tg)
tg.add(ts)
self.tasks[key] = ts
return ts
def stimulus_task_finished(self, key=None, worker=None, **kwargs):
""" Mark that a task has finished execution on a particular worker """
logger.debug("Stimulus task finished %s, %s", key, worker)
ts = self.tasks.get(key)
if ts is None:
return {}
ws = self.workers[worker]
if ts.state == "processing":
recommendations = self.transition(key, "memory", worker=worker, **kwargs)
if ts.state == "memory":
assert ws in ts.who_has
else:
logger.debug(
"Received already computed task, worker: %s, state: %s"
", key: %s, who_has: %s",
worker,
ts.state,
key,
ts.who_has,
)
if ws not in ts.who_has:
self.worker_send(worker, {"op": "release-task", "key": key})
recommendations = {}
return recommendations
def stimulus_task_erred(
self, key=None, worker=None, exception=None, traceback=None, **kwargs
):
""" Mark that a task has erred on a particular worker """
logger.debug("Stimulus task erred %s, %s", key, worker)
ts = self.tasks.get(key)
if ts is None:
return {}
if ts.state == "processing":
retries = ts.retries
if retries > 0:
ts.retries = retries - 1
recommendations = self.transition(key, "waiting")
else:
recommendations = self.transition(
key,
"erred",
cause=key,
exception=exception,
traceback=traceback,
worker=worker,
**kwargs,
)
else:
recommendations = {}
return recommendations
def stimulus_missing_data(
self, cause=None, key=None, worker=None, ensure=True, **kwargs
):
""" Mark that certain keys have gone missing. Recover. """
with log_errors():
logger.debug("Stimulus missing data %s, %s", key, worker)
ts = self.tasks.get(key)
if ts is None or ts.state == "memory":
return {}
cts = self.tasks.get(cause)
recommendations = {}
if cts is not None and cts.state == "memory": # couldn't find this
for ws in cts.who_has: # TODO: this behavior is extreme
ws.has_what.remove(cts)
ws.nbytes -= cts.get_nbytes()
cts.who_has.clear()
recommendations[cause] = "released"
if key:
recommendations[key] = "released"
self.transitions(recommendations)
if self.validate:
assert cause not in self.who_has
return {}
def stimulus_retry(self, comm=None, keys=None, client=None):
logger.info("Client %s requests to retry %d keys", client, len(keys))
if client:
self.log_event(client, {"action": "retry", "count": len(keys)})
stack = list(keys)
seen = set()
roots = []
while stack:
key = stack.pop()
seen.add(key)
erred_deps = [
dts.key for dts in self.tasks[key].dependencies if dts.state == "erred"
]
if erred_deps:
stack.extend(erred_deps)
else:
roots.append(key)
recommendations = {key: "waiting" for key in roots}
self.transitions(recommendations)
if self.validate:
for key in seen:
assert not self.tasks[key].exception_blame
return tuple(seen)
async def remove_worker(self, comm=None, address=None, safe=False, close=True):
"""
Remove worker from cluster
We do this when a worker reports that it plans to leave or when it
appears to be unresponsive. This may send its tasks back to a released
state.
"""
with log_errors():
if self.status == Status.closed:
return
address = self.coerce_address(address)
if address not in self.workers:
return "already-removed"
host = get_address_host(address)
ws = self.workers[address]
self.log_event(
["all", address],
{
"action": "remove-worker",
"worker": address,
"processing-tasks": dict(ws.processing),
},
)
logger.info("Remove worker %s", ws)
if close:
with suppress(AttributeError, CommClosedError):
self.stream_comms[address].send({"op": "close", "report": False})
self.remove_resources(address)
self.host_info[host]["nthreads"] -= ws.nthreads
self.host_info[host]["addresses"].remove(address)
self.total_nthreads -= ws.nthreads
if not self.host_info[host]["addresses"]:
del self.host_info[host]
self.rpc.remove(address)
del self.stream_comms[address]
del self.aliases[ws.name]
self.idle.discard(ws)
self.saturated.discard(ws)
del self.workers[address]
ws.status = "closed"
self.total_occupancy -= ws.occupancy
recommendations = {}
for ts in list(ws.processing):
k = ts.key
recommendations[k] = "released"
if not safe:
ts.suspicious += 1
ts.prefix.suspicious += 1
if ts.suspicious > self.allowed_failures:
del recommendations[k]
e = pickle.dumps(
KilledWorker(task=k, last_worker=ws.clean()), -1
)
r = self.transition(k, "erred", exception=e, cause=k)
recommendations.update(r)
for ts in ws.has_what:
ts.who_has.remove(ws)
if not ts.who_has:
if ts.run_spec:
recommendations[ts.key] = "released"
else: # pure data
recommendations[ts.key] = "forgotten"
ws.has_what.clear()
self.transitions(recommendations)
for plugin in self.plugins[:]:
try:
result = plugin.remove_worker(scheduler=self, worker=address)
if inspect.isawaitable(result):
await result
except Exception as e:
logger.exception(e)
if not self.workers:
logger.info("Lost all workers")
for w in self.workers:
self.bandwidth_workers.pop((address, w), None)
self.bandwidth_workers.pop((w, address), None)
def remove_worker_from_events():
# If the worker isn't registered anymore after the delay, remove from events
if address not in self.workers and address in self.events:
del self.events[address]
cleanup_delay = parse_timedelta(
dask.config.get("distributed.scheduler.events-cleanup-delay")
)
self.loop.call_later(cleanup_delay, remove_worker_from_events)
logger.debug("Removed worker %s", ws)
return "OK"
def stimulus_cancel(self, comm, keys=None, client=None, force=False):
""" Stop execution on a list of keys """
logger.info("Client %s requests to cancel %d keys", client, len(keys))
if client:
self.log_event(
client, {"action": "cancel", "count": len(keys), "force": force}
)
for key in keys:
self.cancel_key(key, client, force=force)
def cancel_key(self, key, client, retries=5, force=False):
""" Cancel a particular key and all dependents """
# TODO: this should be converted to use the transition mechanism
ts = self.tasks.get(key)
try:
cs = self.clients[client]
except KeyError:
return
if ts is None or not ts.who_wants: # no key yet, lets try again in a moment
if retries:
self.loop.call_later(
0.2, lambda: self.cancel_key(key, client, retries - 1)
)
return
if force or ts.who_wants == {cs}: # no one else wants this key
for dts in list(ts.dependents):
self.cancel_key(dts.key, client, force=force)
logger.info("Scheduler cancels key %s. Force=%s", key, force)
self.report({"op": "cancelled-key", "key": key})
clients = list(ts.who_wants) if force else [cs]
for c in clients:
self.client_releases_keys(keys=[key], client=c.client_key)
def client_desires_keys(self, keys=None, client=None):
cs = self.clients.get(client)
if cs is None:
# For publish, queues etc.
cs = self.clients[client] = ClientState(client)
for k in keys:
ts = self.tasks.get(k)
if ts is None:
# For publish, queues etc.
ts = self.new_task(k, None, "released")
ts.who_wants.add(cs)
cs.wants_what.add(ts)
if ts.state in ("memory", "erred"):
self.report_on_key(k, client=client)
def client_releases_keys(self, keys=None, client=None):
""" Remove keys from client desired list """
logger.debug("Client %s releases keys: %s", client, keys)
cs = self.clients[client]
tasks2 = set()
for key in list(keys):
ts = self.tasks.get(key)
if ts is not None and ts in cs.wants_what:
cs.wants_what.remove(ts)
s = ts.who_wants
s.remove(cs)
if not s:
tasks2.add(ts)
recommendations = {}
for ts in tasks2:
if not ts.dependents:
# No live dependents, can forget
recommendations[ts.key] = "forgotten"
elif ts.state != "erred" and not ts.waiters:
recommendations[ts.key] = "released"
self.transitions(recommendations)
def client_heartbeat(self, client=None):
""" Handle heartbeats from Client """
self.clients[client].last_seen = time()
###################
# Task Validation #
###################
def validate_released(self, key):
ts = self.tasks[key]
assert ts.state == "released"
assert not ts.waiters
assert not ts.waiting_on
assert not ts.who_has
assert not ts.processing_on
assert not any(ts in dts.waiters for dts in ts.dependencies)
assert ts not in self.unrunnable
def validate_waiting(self, key):
ts = self.tasks[key]
assert ts.waiting_on
assert not ts.who_has
assert not ts.processing_on
assert ts not in self.unrunnable
for dts in ts.dependencies:
# We are waiting on a dependency iff it's not stored
assert bool(dts.who_has) + (dts in ts.waiting_on) == 1
assert ts in dts.waiters # XXX even if dts.who_has?
def validate_processing(self, key):
ts = self.tasks[key]
assert not ts.waiting_on
ws = ts.processing_on
assert ws
assert ts in ws.processing
assert not ts.who_has
for dts in ts.dependencies:
assert dts.who_has
assert ts in dts.waiters
def validate_memory(self, key):
ts = self.tasks[key]
assert ts.who_has
assert not ts.processing_on
assert not ts.waiting_on
assert ts not in self.unrunnable
for dts in ts.dependents:
assert (dts in ts.waiters) == (dts.state in ("waiting", "processing"))
assert ts not in dts.waiting_on
def validate_no_worker(self, key):
ts = self.tasks[key]
assert ts in self.unrunnable
assert not ts.waiting_on
assert ts in self.unrunnable
assert not ts.processing_on
assert not ts.who_has
for dts in ts.dependencies:
assert dts.who_has
def validate_erred(self, key):
ts = self.tasks[key]
assert ts.exception_blame
assert not ts.who_has
def validate_key(self, key, ts=None):
try:
if ts is None:
ts = self.tasks.get(key)
if ts is None:
logger.debug("Key lost: %s", key)
else:
ts.validate()
try:
func = getattr(self, "validate_" + ts.state.replace("-", "_"))
except AttributeError:
logger.error(
"self.validate_%s not found", ts.state.replace("-", "_")
)
else:
func(key)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def validate_state(self, allow_overlap=False):
validate_state(self.tasks, self.workers, self.clients)
if not (set(self.workers) == set(self.stream_comms)):
raise ValueError("Workers not the same in all collections")
for w, ws in self.workers.items():
assert isinstance(w, str), (type(w), w)
assert isinstance(ws, WorkerState), (type(ws), ws)
assert ws.address == w
if not ws.processing:
assert not ws.occupancy
assert ws in self.idle
for k, ts in self.tasks.items():
assert isinstance(ts, TaskState), (type(ts), ts)
assert ts.key == k
self.validate_key(k, ts)
for c, cs in self.clients.items():
# client=None is often used in tests...
assert c is None or isinstance(c, str), (type(c), c)
assert isinstance(cs, ClientState), (type(cs), cs)
assert cs.client_key == c
a = {w: ws.nbytes for w, ws in self.workers.items()}
b = {
w: sum(ts.get_nbytes() for ts in ws.has_what)
for w, ws in self.workers.items()
}
assert a == b, (a, b)
actual_total_occupancy = 0
for worker, ws in self.workers.items():
assert abs(sum(ws.processing.values()) - ws.occupancy) < 1e-8
actual_total_occupancy += ws.occupancy
assert abs(actual_total_occupancy - self.total_occupancy) < 1e-8, (
actual_total_occupancy,
self.total_occupancy,
)
###################
# Manage Messages #
###################
def report(self, msg, ts=None, client=None):
"""
Publish updates to all listening Queues and Comms
If the message contains a key then we only send the message to those
comms that care about the key.
"""
comms = set()
if client is not None:
try:
comms.add(self.client_comms[client])
except KeyError:
pass
if ts is None and "key" in msg:
ts = self.tasks.get(msg["key"])
if ts is None:
# Notify all clients
comms |= set(self.client_comms.values())
else:
# Notify clients interested in key
comms |= {
self.client_comms[c.client_key]
for c in ts.who_wants
if c.client_key in self.client_comms
}
for c in comms:
try:
c.send(msg)
# logger.debug("Scheduler sends message to client %s", msg)
except CommClosedError:
if self.status == Status.running:
logger.critical("Tried writing to closed comm: %s", msg)
async def add_client(self, comm, client=None, versions=None):
""" Add client to network
We listen to all future messages from this Comm.
"""
assert client is not None
comm.name = "Scheduler->Client"
logger.info("Receive client connection: %s", client)
self.log_event(["all", client], {"action": "add-client", "client": client})
self.clients[client] = ClientState(client, versions=versions)
for plugin in self.plugins[:]:
try:
plugin.add_client(scheduler=self, client=client)
except Exception as e:
logger.exception(e)
try:
bcomm = BatchedSend(interval="2ms", loop=self.loop)
bcomm.start(comm)
self.client_comms[client] = bcomm
msg = {"op": "stream-start"}
version_warning = version_module.error_message(
version_module.get_versions(),
{w: ws.versions for w, ws in self.workers.items()},
versions,
)
if version_warning:
msg["warning"] = version_warning
bcomm.send(msg)
try:
await self.handle_stream(comm=comm, extra={"client": client})
finally:
self.remove_client(client=client)
logger.debug("Finished handling client %s", client)
finally:
if not comm.closed():
self.client_comms[client].send({"op": "stream-closed"})
try:
if not shutting_down():
await self.client_comms[client].close()
del self.client_comms[client]
if self.status == Status.running:
logger.info("Close client connection: %s", client)
except TypeError: # comm becomes None during GC
pass
def remove_client(self, client=None):
""" Remove client from network """
if self.status == Status.running:
logger.info("Remove client %s", client)
self.log_event(["all", client], {"action": "remove-client", "client": client})
try:
cs = self.clients[client]
except KeyError:
# XXX is this a legitimate condition?
pass
else:
self.client_releases_keys(
keys=[ts.key for ts in cs.wants_what], client=cs.client_key
)
del self.clients[client]
for plugin in self.plugins[:]:
try:
plugin.remove_client(scheduler=self, client=client)
except Exception as e:
logger.exception(e)
def remove_client_from_events():
# If the client isn't registered anymore after the delay, remove from events
if client not in self.clients and client in self.events:
del self.events[client]
cleanup_delay = parse_timedelta(
dask.config.get("distributed.scheduler.events-cleanup-delay")
)
self.loop.call_later(cleanup_delay, remove_client_from_events)
def send_task_to_worker(self, worker, key):
""" Send a single computational task to a worker """
try:
ts = self.tasks[key]
msg = {
"op": "compute-task",
"key": key,
"priority": ts.priority,
"duration": self.get_task_duration(ts),
}
if ts.resource_restrictions:
msg["resource_restrictions"] = ts.resource_restrictions
if ts.actor:
msg["actor"] = True
deps = ts.dependencies
if deps:
msg["who_has"] = {
dep.key: [ws.address for ws in dep.who_has] for dep in deps
}
msg["nbytes"] = {dep.key: dep.nbytes for dep in deps}
if self.validate and deps:
assert all(msg["who_has"].values())
task = ts.run_spec
if type(task) is dict:
msg.update(task)
else:
msg["task"] = task
self.worker_send(worker, msg)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def handle_uncaught_error(self, **msg):
logger.exception(clean_exception(**msg)[1])
def handle_task_finished(self, key=None, worker=None, **msg):
if worker not in self.workers:
return
validate_key(key)
r = self.stimulus_task_finished(key=key, worker=worker, **msg)
self.transitions(r)
def handle_task_erred(self, key=None, **msg):
r = self.stimulus_task_erred(key=key, **msg)
self.transitions(r)
def handle_release_data(self, key=None, worker=None, client=None, **msg):
ts = self.tasks.get(key)
if ts is None:
return
ws = self.workers[worker]
if ts.processing_on != ws:
return
r = self.stimulus_missing_data(key=key, ensure=False, **msg)
self.transitions(r)
def handle_missing_data(self, key=None, errant_worker=None, **kwargs):
logger.debug("handle missing data key=%s worker=%s", key, errant_worker)
self.log.append(("missing", key, errant_worker))
ts = self.tasks.get(key)
if ts is None or not ts.who_has:
return
if errant_worker in self.workers:
ws = self.workers[errant_worker]
if ws in ts.who_has:
ts.who_has.remove(ws)
ws.has_what.remove(ts)
ws.nbytes -= ts.get_nbytes()
if not ts.who_has:
if ts.run_spec:
self.transitions({key: "released"})
else:
self.transitions({key: "forgotten"})
def release_worker_data(self, comm=None, keys=None, worker=None):
ws = self.workers[worker]
tasks = {self.tasks[k] for k in keys}
removed_tasks = tasks & ws.has_what
ws.has_what -= removed_tasks
recommendations = {}
for ts in removed_tasks:
ws.nbytes -= ts.get_nbytes()
wh = ts.who_has
wh.remove(ws)
if not wh:
recommendations[ts.key] = "released"
if recommendations:
self.transitions(recommendations)
def handle_long_running(self, key=None, worker=None, compute_duration=None):
""" A task has seceded from the thread pool
We stop the task from being stolen in the future, and change task
duration accounting as if the task has stopped.
"""
ts = self.tasks[key]
if "stealing" in self.extensions:
self.extensions["stealing"].remove_key_from_stealable(ts)
ws = ts.processing_on
if ws is None:
logger.debug("Received long-running signal from duplicate task. Ignoring.")
return
if compute_duration:
old_duration = ts.prefix.duration_average or 0
new_duration = compute_duration
if not old_duration:
avg_duration = new_duration
else:
avg_duration = 0.5 * old_duration + 0.5 * new_duration
ts.prefix.duration_average = avg_duration
ws.occupancy -= ws.processing[ts]
self.total_occupancy -= ws.processing[ts]
ws.processing[ts] = 0
self.check_idle_saturated(ws)
async def handle_worker(self, comm=None, worker=None):
"""
Listen to responses from a single worker
This is the main loop for scheduler-worker interaction
See Also
--------
Scheduler.handle_client: Equivalent coroutine for clients
"""
comm.name = "Scheduler connection to worker"
worker_comm = self.stream_comms[worker]
worker_comm.start(comm)
logger.info("Starting worker compute stream, %s", worker)
try:
await self.handle_stream(comm=comm, extra={"worker": worker})
finally:
if worker in self.stream_comms:
worker_comm.abort()
await self.remove_worker(address=worker)
def add_plugin(self, plugin=None, idempotent=False, **kwargs):
"""
Add external plugin to scheduler
See https://distributed.readthedocs.io/en/latest/plugins.html
"""
if isinstance(plugin, type):
plugin = plugin(self, **kwargs)
if idempotent and any(isinstance(p, type(plugin)) for p in self.plugins):
return
self.plugins.append(plugin)
def remove_plugin(self, plugin):
""" Remove external plugin from scheduler """
self.plugins.remove(plugin)
def worker_send(self, worker, msg):
""" Send message to worker
This also handles connection failures by adding a callback to remove
the worker on the next cycle.
"""
try:
self.stream_comms[worker].send(msg)
except (CommClosedError, AttributeError):
self.loop.add_callback(self.remove_worker, address=worker)
############################
# Less common interactions #
############################
async def scatter(
self,
comm=None,
data=None,
workers=None,
client=None,
broadcast=False,
timeout=2,
):
""" Send data out to workers
See also
--------
Scheduler.broadcast:
"""
start = time()
while not self.workers:
await asyncio.sleep(0.2)
if time() > start + timeout:
raise TimeoutError("No workers found")
if workers is None:
nthreads = {w: ws.nthreads for w, ws in self.workers.items()}
else:
workers = [self.coerce_address(w) for w in workers]
nthreads = {w: self.workers[w].nthreads for w in workers}
assert isinstance(data, dict)
keys, who_has, nbytes = await scatter_to_workers(
nthreads, data, rpc=self.rpc, report=False
)
self.update_data(who_has=who_has, nbytes=nbytes, client=client)
if broadcast:
if broadcast == True: # noqa: E712
n = len(nthreads)
else:
n = broadcast
await self.replicate(keys=keys, workers=workers, n=n)
self.log_event(
[client, "all"], {"action": "scatter", "client": client, "count": len(data)}
)
return keys
async def gather(self, comm=None, keys=None, serializers=None):
""" Collect data in from workers """
keys = list(keys)
who_has = {}
for key in keys:
ts = self.tasks.get(key)
if ts is not None:
who_has[key] = [ws.address for ws in ts.who_has]
else:
who_has[key] = []
data, missing_keys, missing_workers = await gather_from_workers(
who_has, rpc=self.rpc, close=False, serializers=serializers
)
if not missing_keys:
result = {"status": "OK", "data": data}
else:
missing_states = [
(self.tasks[key].state if key in self.tasks else None)
for key in missing_keys
]
logger.exception(
"Couldn't gather keys %s state: %s workers: %s",
missing_keys,
missing_states,
missing_workers,
)
result = {"status": "error", "keys": missing_keys}
with log_errors():
# Remove suspicious workers from the scheduler but allow them to
# reconnect.
await asyncio.gather(
*[
self.remove_worker(address=worker, close=False)
for worker in missing_workers
]
)
for key, workers in missing_keys.items():
# Task may already be gone if it was held by a
# `missing_worker`
ts = self.tasks.get(key)
logger.exception(
"Workers don't have promised key: %s, %s",
str(workers),
str(key),
)
if not workers or ts is None:
continue
for worker in workers:
ws = self.workers.get(worker)
if ws is not None and ts in ws.has_what:
ws.has_what.remove(ts)
ts.who_has.remove(ws)
ws.nbytes -= ts.get_nbytes()
self.transitions({key: "released"})
self.log_event("all", {"action": "gather", "count": len(keys)})
return result
def clear_task_state(self):
# XXX what about nested state such as ClientState.wants_what
# (see also fire-and-forget...)
logger.info("Clear task state")
for collection in self._task_state_collections:
collection.clear()
async def restart(self, client=None, timeout=3):
""" Restart all workers. Reset local state. """
with log_errors():
n_workers = len(self.workers)
logger.info("Send lost future signal to clients")
for cs in self.clients.values():
self.client_releases_keys(
keys=[ts.key for ts in cs.wants_what], client=cs.client_key
)
nannies = {addr: ws.nanny for addr, ws in self.workers.items()}
for addr in list(self.workers):
try:
# Ask the worker to close if it doesn't have a nanny,
# otherwise the nanny will kill it anyway
await self.remove_worker(address=addr, close=addr not in nannies)
except Exception as e:
logger.info(
"Exception while restarting. This is normal", exc_info=True
)
self.clear_task_state()
for plugin in self.plugins[:]:
try:
plugin.restart(self)
except Exception as e:
logger.exception(e)
logger.debug("Send kill signal to nannies: %s", nannies)
nannies = [
rpc(nanny_address, connection_args=self.connection_args)
for nanny_address in nannies.values()
if nanny_address is not None
]
resps = All(
[
nanny.restart(
close=True, timeout=timeout * 0.8, executor_wait=False
)
for nanny in nannies
]
)
try:
resps = await asyncio.wait_for(resps, timeout)
except TimeoutError:
logger.error(
"Nannies didn't report back restarted within "
"timeout. Continuuing with restart process"
)
else:
if not all(resp == "OK" for resp in resps):
logger.error(
"Not all workers responded positively: %s", resps, exc_info=True
)
finally:
await asyncio.gather(*[nanny.close_rpc() for nanny in nannies])
self.clear_task_state()
with suppress(AttributeError):
for c in self._worker_coroutines:
c.cancel()
self.log_event([client, "all"], {"action": "restart", "client": client})
start = time()
while time() < start + 10 and len(self.workers) < n_workers:
await asyncio.sleep(0.01)
self.report({"op": "restart"})
async def broadcast(
self,
comm=None,
msg=None,
workers=None,
hosts=None,
nanny=False,
serializers=None,
):
""" Broadcast message to workers, return all results """
if workers is None or workers is True:
if hosts is None:
workers = list(self.workers)
else:
workers = []
if hosts is not None:
for host in hosts:
if host in self.host_info:
workers.extend(self.host_info[host]["addresses"])
# TODO replace with worker_list
if nanny:
addresses = [self.workers[w].nanny for w in workers]
else:
addresses = workers
async def send_message(addr):
comm = await self.rpc.connect(addr)
comm.name = "Scheduler Broadcast"
try:
resp = await send_recv(comm, close=True, serializers=serializers, **msg)
finally:
self.rpc.reuse(addr, comm)
return resp
results = await All(
[send_message(address) for address in addresses if address is not None]
)
return dict(zip(workers, results))
async def proxy(self, comm=None, msg=None, worker=None, serializers=None):
""" Proxy a communication through the scheduler to some other worker """
d = await self.broadcast(
comm=comm, msg=msg, workers=[worker], serializers=serializers
)
return d[worker]
async def _delete_worker_data(self, worker_address, keys):
""" Delete data from a worker and update the corresponding worker/task states
Parameters
----------
worker_address: str
Worker address to delete keys from
keys: List[str]
List of keys to delete on the specified worker
"""
await retry_operation(
self.rpc(addr=worker_address).delete_data, keys=list(keys), report=False
)
ws = self.workers[worker_address]
tasks = {self.tasks[key] for key in keys}
ws.has_what -= tasks
for ts in tasks:
ts.who_has.remove(ws)
ws.nbytes -= ts.get_nbytes()
self.log_event(ws.address, {"action": "remove-worker-data", "keys": keys})
async def rebalance(self, comm=None, keys=None, workers=None):
""" Rebalance keys so that each worker stores roughly equal bytes
**Policy**
This orders the workers by what fraction of bytes of the existing keys
they have. It walks down this list from most-to-least. At each worker
it sends the largest results it can find and sends them to the least
occupied worker until either the sender or the recipient are at the
average expected load.
"""
with log_errors():
async with self._lock:
if keys:
tasks = {self.tasks[k] for k in keys}
missing_data = [ts.key for ts in tasks if not ts.who_has]
if missing_data:
return {"status": "missing-data", "keys": missing_data}
else:
tasks = set(self.tasks.values())
if workers:
workers = {self.workers[w] for w in workers}
workers_by_task = {ts: ts.who_has & workers for ts in tasks}
else:
workers = set(self.workers.values())
workers_by_task = {ts: ts.who_has for ts in tasks}
tasks_by_worker = {ws: set() for ws in workers}
for k, v in workers_by_task.items():
for vv in v:
tasks_by_worker[vv].add(k)
worker_bytes = {
ws: sum(ts.get_nbytes() for ts in v)
for ws, v in tasks_by_worker.items()
}
avg = sum(worker_bytes.values()) / len(worker_bytes)
sorted_workers = list(
map(first, sorted(worker_bytes.items(), key=second, reverse=True))
)
recipients = iter(reversed(sorted_workers))
recipient = next(recipients)
msgs = [] # (sender, recipient, key)
for sender in sorted_workers[: len(workers) // 2]:
sender_keys = {
ts: ts.get_nbytes() for ts in tasks_by_worker[sender]
}
sender_keys = iter(
sorted(sender_keys.items(), key=second, reverse=True)
)
try:
while worker_bytes[sender] > avg:
while (
worker_bytes[recipient] < avg
and worker_bytes[sender] > avg
):
ts, nb = next(sender_keys)
if ts not in tasks_by_worker[recipient]:
tasks_by_worker[recipient].add(ts)
# tasks_by_worker[sender].remove(ts)
msgs.append((sender, recipient, ts))
worker_bytes[sender] -= nb
worker_bytes[recipient] += nb
if worker_bytes[sender] > avg:
recipient = next(recipients)
except StopIteration:
break
to_recipients = defaultdict(lambda: defaultdict(list))
to_senders = defaultdict(list)
for sender, recipient, ts in msgs:
to_recipients[recipient.address][ts.key].append(sender.address)
to_senders[sender.address].append(ts.key)
result = await asyncio.gather(
*(
retry_operation(self.rpc(addr=r).gather, who_has=v)
for r, v in to_recipients.items()
)
)
for r, v in to_recipients.items():
self.log_event(r, {"action": "rebalance", "who_has": v})
self.log_event(
"all",
{
"action": "rebalance",
"total-keys": len(tasks),
"senders": valmap(len, to_senders),
"recipients": valmap(len, to_recipients),
"moved_keys": len(msgs),
},
)
if not all(r["status"] == "OK" for r in result):
return {
"status": "missing-data",
"keys": tuple(
concat(
r["keys"].keys()
for r in result
if r["status"] == "missing-data"
)
),
}
for sender, recipient, ts in msgs:
assert ts.state == "memory"
ts.who_has.add(recipient)
recipient.has_what.add(ts)
recipient.nbytes += ts.get_nbytes()
self.log.append(
("rebalance", ts.key, time(), sender.address, recipient.address)
)
await asyncio.gather(
*(self._delete_worker_data(r, v) for r, v in to_senders.items())
)
return {"status": "OK"}
async def replicate(
self,
comm=None,
keys=None,
n=None,
workers=None,
branching_factor=2,
delete=True,
lock=True,
):
""" Replicate data throughout cluster
This performs a tree copy of the data throughout the network
individually on each piece of data.
Parameters
----------
keys: Iterable
list of keys to replicate
n: int
Number of replications we expect to see within the cluster
branching_factor: int, optional
The number of workers that can copy data in each generation.
The larger the branching factor, the more data we copy in
a single step, but the more a given worker risks being
swamped by data requests.
See also
--------
Scheduler.rebalance
"""
assert branching_factor > 0
async with self._lock if lock else empty_context:
workers = {self.workers[w] for w in self.workers_list(workers)}
if n is None:
n = len(workers)
else:
n = min(n, len(workers))
if n == 0:
raise ValueError("Can not use replicate to delete data")
tasks = {self.tasks[k] for k in keys}
missing_data = [ts.key for ts in tasks if not ts.who_has]
if missing_data:
return {"status": "missing-data", "keys": missing_data}
# Delete extraneous data
if delete:
del_worker_tasks = defaultdict(set)
for ts in tasks:
del_candidates = ts.who_has & workers
if len(del_candidates) > n:
for ws in random.sample(
del_candidates, len(del_candidates) - n
):
del_worker_tasks[ws].add(ts)
await asyncio.gather(
*(
self._delete_worker_data(ws.address, [t.key for t in tasks])
for ws, tasks in del_worker_tasks.items()
)
)
# Copy not-yet-filled data
while tasks:
gathers = defaultdict(dict)
for ts in list(tasks):
n_missing = n - len(ts.who_has & workers)
if n_missing <= 0:
# Already replicated enough
tasks.remove(ts)
continue
count = min(n_missing, branching_factor * len(ts.who_has))
assert count > 0
for ws in random.sample(workers - ts.who_has, count):
gathers[ws.address][ts.key] = [
wws.address for wws in ts.who_has
]
results = await asyncio.gather(
*(
retry_operation(self.rpc(addr=w).gather, who_has=who_has)
for w, who_has in gathers.items()
)
)
for w, v in zip(gathers, results):
if v["status"] == "OK":
self.add_keys(worker=w, keys=list(gathers[w]))
else:
logger.warning("Communication failed during replication: %s", v)
self.log_event(w, {"action": "replicate-add", "keys": gathers[w]})
self.log_event(
"all",
{
"action": "replicate",
"workers": list(workers),
"key-count": len(keys),
"branching-factor": branching_factor,
},
)
def workers_to_close(
self,
comm=None,
memory_ratio=None,
n=None,
key=None,
minimum=None,
target=None,
attribute="address",
):
"""
Find workers that we can close with low cost
This returns a list of workers that are good candidates to retire.
These workers are not running anything and are storing
relatively little data relative to their peers. If all workers are
idle then we still maintain enough workers to have enough RAM to store
our data, with a comfortable buffer.
This is for use with systems like ``distributed.deploy.adaptive``.
Parameters
----------
memory_factor: Number
Amount of extra space we want to have for our stored data.
Defaults two 2, or that we want to have twice as much memory as we
currently have data.
n: int
Number of workers to close
minimum: int
Minimum number of workers to keep around
key: Callable(WorkerState)
An optional callable mapping a WorkerState object to a group
affiliation. Groups will be closed together. This is useful when
closing workers must be done collectively, such as by hostname.
target: int
Target number of workers to have after we close
attribute : str
The attribute of the WorkerState object to return, like "address"
or "name". Defaults to "address".
Examples
--------
>>> scheduler.workers_to_close()
['tcp://192.168.0.1:1234', 'tcp://192.168.0.2:1234']
Group workers by hostname prior to closing
>>> scheduler.workers_to_close(key=lambda ws: ws.host)
['tcp://192.168.0.1:1234', 'tcp://192.168.0.1:4567']
Remove two workers
>>> scheduler.workers_to_close(n=2)
Keep enough workers to have twice as much memory as we we need.
>>> scheduler.workers_to_close(memory_ratio=2)
Returns
-------
to_close: list of worker addresses that are OK to close
See Also
--------
Scheduler.retire_workers
"""
if target is not None and n is None:
n = len(self.workers) - target
if n is not None:
if n < 0:
n = 0
target = len(self.workers) - n
if n is None and memory_ratio is None:
memory_ratio = 2
with log_errors():
if not n and all(ws.processing for ws in self.workers.values()):
return []
if key is None:
key = lambda ws: ws.address
if isinstance(key, bytes) and dask.config.get(
"distributed.scheduler.pickle"
):
key = pickle.loads(key)
groups = groupby(key, self.workers.values())
limit_bytes = {
k: sum(ws.memory_limit for ws in v) for k, v in groups.items()
}
group_bytes = {k: sum(ws.nbytes for ws in v) for k, v in groups.items()}
limit = sum(limit_bytes.values())
total = sum(group_bytes.values())
def _key(group):
is_idle = not any(ws.processing for ws in groups[group])
bytes = -group_bytes[group]
return (is_idle, bytes)
idle = sorted(groups, key=_key)
to_close = []
n_remain = len(self.workers)
while idle:
group = idle.pop()
if n is None and any(ws.processing for ws in groups[group]):
break
if minimum and n_remain - len(groups[group]) < minimum:
break
limit -= limit_bytes[group]
if (n is not None and n_remain - len(groups[group]) >= target) or (
memory_ratio is not None and limit >= memory_ratio * total
):
to_close.append(group)
n_remain -= len(groups[group])
else:
break
result = [getattr(ws, attribute) for g in to_close for ws in groups[g]]
if result:
logger.debug("Suggest closing workers: %s", result)
return result
async def retire_workers(
self,
comm=None,
workers=None,
remove=True,
close_workers=False,
names=None,
lock=True,
**kwargs,
):
""" Gracefully retire workers from cluster
Parameters
----------
workers: list (optional)
List of worker addresses to retire.
If not provided we call ``workers_to_close`` which finds a good set
workers_names: list (optional)
List of worker names to retire.
remove: bool (defaults to True)
Whether or not to remove the worker metadata immediately or else
wait for the worker to contact us
close_workers: bool (defaults to False)
Whether or not to actually close the worker explicitly from here.
Otherwise we expect some external job scheduler to finish off the
worker.
**kwargs: dict
Extra options to pass to workers_to_close to determine which
workers we should drop
Returns
-------
Dictionary mapping worker ID/address to dictionary of information about
that worker for each retired worker.
See Also
--------
Scheduler.workers_to_close
"""
with log_errors():
async with self._lock if lock else empty_context:
if names is not None:
if names:
logger.info("Retire worker names %s", names)
names = set(map(str, names))
workers = [
ws.address
for ws in self.workers.values()
if str(ws.name) in names
]
if workers is None:
while True:
try:
workers = self.workers_to_close(**kwargs)
if workers:
workers = await self.retire_workers(
workers=workers,
remove=remove,
close_workers=close_workers,
lock=False,
)
return workers
except KeyError: # keys left during replicate
pass
workers = {self.workers[w] for w in workers if w in self.workers}
if not workers:
return []
logger.info("Retire workers %s", workers)
# Keys orphaned by retiring those workers
keys = set.union(*[w.has_what for w in workers])
keys = {ts.key for ts in keys if ts.who_has.issubset(workers)}
other_workers = set(self.workers.values()) - workers
if keys:
if other_workers:
logger.info("Moving %d keys to other workers", len(keys))
await self.replicate(
keys=keys,
workers=[ws.address for ws in other_workers],
n=1,
delete=False,
lock=False,
)
else:
return []
worker_keys = {ws.address: ws.identity() for ws in workers}
if close_workers and worker_keys:
await asyncio.gather(
*[self.close_worker(worker=w, safe=True) for w in worker_keys]
)
if remove:
await asyncio.gather(
*[self.remove_worker(address=w, safe=True) for w in worker_keys]
)
self.log_event(
"all",
{
"action": "retire-workers",
"workers": worker_keys,
"moved-keys": len(keys),
},
)
self.log_event(list(worker_keys), {"action": "retired"})
return worker_keys
def add_keys(self, comm=None, worker=None, keys=()):
"""
Learn that a worker has certain keys
This should not be used in practice and is mostly here for legacy
reasons. However, it is sent by workers from time to time.
"""
if worker not in self.workers:
return "not found"
ws = self.workers[worker]
for key in keys:
ts = self.tasks.get(key)
if ts is not None and ts.state == "memory":
if ts not in ws.has_what:
ws.nbytes += ts.get_nbytes()
ws.has_what.add(ts)
ts.who_has.add(ws)
else:
self.worker_send(
worker, {"op": "delete-data", "keys": [key], "report": False}
)
return "OK"
def update_data(
self, comm=None, who_has=None, nbytes=None, client=None, serializers=None
):
"""
Learn that new data has entered the network from an external source
See Also
--------
Scheduler.mark_key_in_memory
"""
with log_errors():
who_has = {
k: [self.coerce_address(vv) for vv in v] for k, v in who_has.items()
}
logger.debug("Update data %s", who_has)
for key, workers in who_has.items():
ts = self.tasks.get(key)
if ts is None:
ts = self.new_task(key, None, "memory")
ts.state = "memory"
if key in nbytes:
ts.set_nbytes(nbytes[key])
for w in workers:
ws = self.workers[w]
if ts not in ws.has_what:
ws.nbytes += ts.get_nbytes()
ws.has_what.add(ts)
ts.who_has.add(ws)
self.report(
{"op": "key-in-memory", "key": key, "workers": list(workers)}
)
if client:
self.client_desires_keys(keys=list(who_has), client=client)
def report_on_key(self, key=None, ts=None, client=None):
assert (key is None) + (ts is None) == 1, (key, ts)
if ts is None:
try:
ts = self.tasks[key]
except KeyError:
self.report({"op": "cancelled-key", "key": key}, client=client)
return
else:
key = ts.key
if ts.state == "forgotten":
self.report({"op": "cancelled-key", "key": key}, ts=ts, client=client)
elif ts.state == "memory":
self.report({"op": "key-in-memory", "key": key}, ts=ts, client=client)
elif ts.state == "erred":
failing_ts = ts.exception_blame
self.report(
{
"op": "task-erred",
"key": key,
"exception": failing_ts.exception,
"traceback": failing_ts.traceback,
},
ts=ts,
client=client,
)
async def feed(
self, comm, function=None, setup=None, teardown=None, interval="1s", **kwargs
):
"""
Provides a data Comm to external requester
Caution: this runs arbitrary Python code on the scheduler. This should
eventually be phased out. It is mostly used by diagnostics.
"""
if not dask.config.get("distributed.scheduler.pickle"):
logger.warn(
"Tried to call 'feed' route with custom functions, but "
"pickle is disallowed. Set the 'distributed.scheduler.pickle'"
"config value to True to use the 'feed' route (this is mostly "
"commonly used with progress bars)"
)
return
interval = parse_timedelta(interval)
with log_errors():
if function:
function = pickle.loads(function)
if setup:
setup = pickle.loads(setup)
if teardown:
teardown = pickle.loads(teardown)
state = setup(self) if setup else None
if inspect.isawaitable(state):
state = await state
try:
while self.status == Status.running:
if state is None:
response = function(self)
else:
response = function(self, state)
await comm.write(response)
await asyncio.sleep(interval)
except (EnvironmentError, CommClosedError):
pass
finally:
if teardown:
teardown(self, state)
def subscribe_worker_status(self, comm=None):
WorkerStatusPlugin(self, comm)
ident = self.identity()
for v in ident["workers"].values():
del v["metrics"]
del v["last_seen"]
return ident
def get_processing(self, comm=None, workers=None):
if workers is not None:
workers = set(map(self.coerce_address, workers))
return {w: [ts.key for ts in self.workers[w].processing] for w in workers}
else:
return {
w: [ts.key for ts in ws.processing] for w, ws in self.workers.items()
}
def get_who_has(self, comm=None, keys=None):
if keys is not None:
return {
k: [ws.address for ws in self.tasks[k].who_has]
if k in self.tasks
else []
for k in keys
}
else:
return {
key: [ws.address for ws in ts.who_has] for key, ts in self.tasks.items()
}
def get_has_what(self, comm=None, workers=None):
if workers is not None:
workers = map(self.coerce_address, workers)
return {
w: [ts.key for ts in self.workers[w].has_what]
if w in self.workers
else []
for w in workers
}
else:
return {w: [ts.key for ts in ws.has_what] for w, ws in self.workers.items()}
def get_ncores(self, comm=None, workers=None):
if workers is not None:
workers = map(self.coerce_address, workers)
return {w: self.workers[w].nthreads for w in workers if w in self.workers}
else:
return {w: ws.nthreads for w, ws in self.workers.items()}
async def get_call_stack(self, comm=None, keys=None):
if keys is not None:
stack = list(keys)
processing = set()
while stack:
key = stack.pop()
ts = self.tasks[key]
if ts.state == "waiting":
stack.extend(dts.key for dts in ts.dependencies)
elif ts.state == "processing":
processing.add(ts)
workers = defaultdict(list)
for ts in processing:
if ts.processing_on:
workers[ts.processing_on.address].append(ts.key)
else:
workers = {w: None for w in self.workers}
if not workers:
return {}
results = await asyncio.gather(
*(self.rpc(w).call_stack(keys=v) for w, v in workers.items())
)
response = {w: r for w, r in zip(workers, results) if r}
return response
def get_nbytes(self, comm=None, keys=None, summary=True):
with log_errors():
if keys is not None:
result = {k: self.tasks[k].nbytes for k in keys}
else:
result = {
k: ts.nbytes
for k, ts in self.tasks.items()
if ts.nbytes is not None
}
if summary:
out = defaultdict(lambda: 0)
for k, v in result.items():
out[key_split(k)] += v
result = dict(out)
return result
def get_comm_cost(self, ts, ws):
"""
Get the estimated communication cost (in s.) to compute the task
on the given worker.
"""
return sum(dts.nbytes for dts in ts.dependencies - ws.has_what) / self.bandwidth
def get_task_duration(self, ts, default=None):
"""
Get the estimated computation cost of the given task
(not including any communication cost).
"""
duration = ts.prefix.duration_average
if duration is None:
self.unknown_durations[ts.prefix.name].add(ts)
if default is None:
default = parse_timedelta(
dask.config.get("distributed.scheduler.unknown-task-duration")
)
return default
return duration
def run_function(self, stream, function, args=(), kwargs={}, wait=True):
""" Run a function within this process
See Also
--------
Client.run_on_scheduler:
"""
from .worker import run
self.log_event("all", {"action": "run-function", "function": function})
return run(self, stream, function=function, args=args, kwargs=kwargs, wait=wait)
def set_metadata(self, comm=None, keys=None, value=None):
try:
metadata = self.task_metadata
for key in keys[:-1]:
if key not in metadata or not isinstance(metadata[key], (dict, list)):
metadata[key] = dict()
metadata = metadata[key]
metadata[keys[-1]] = value
except Exception as e:
import pdb
pdb.set_trace()
def get_metadata(self, comm=None, keys=None, default=no_default):
metadata = self.task_metadata
for key in keys[:-1]:
metadata = metadata[key]
try:
return metadata[keys[-1]]
except KeyError:
if default != no_default:
return default
else:
raise
def get_task_status(self, comm=None, keys=None):
return {
key: (self.tasks[key].state if key in self.tasks else None) for key in keys
}
def get_task_stream(self, comm=None, start=None, stop=None, count=None):
from distributed.diagnostics.task_stream import TaskStreamPlugin
self.add_plugin(TaskStreamPlugin, idempotent=True)
ts = [p for p in self.plugins if isinstance(p, TaskStreamPlugin)][0]
return ts.collect(start=start, stop=stop, count=count)
async def register_worker_plugin(self, comm, plugin, name=None):
""" Registers a setup function, and call it on every worker """
self.worker_plugins.append(plugin)
responses = await self.broadcast(
msg=dict(op="plugin-add", plugin=plugin, name=name)
)
return responses
#####################
# State Transitions #
#####################
def _remove_from_processing(self, ts, send_worker_msg=None):
"""
Remove *ts* from the set of processing tasks.
"""
ws = ts.processing_on
ts.processing_on = None
w = ws.address
if w in self.workers: # may have been removed
duration = ws.processing.pop(ts)
if not ws.processing:
self.total_occupancy -= ws.occupancy
ws.occupancy = 0
else:
self.total_occupancy -= duration
ws.occupancy -= duration
self.check_idle_saturated(ws)
self.release_resources(ts, ws)
if send_worker_msg:
self.worker_send(w, send_worker_msg)
def _add_to_memory(
self, ts, ws, recommendations, type=None, typename=None, **kwargs
):
"""
Add *ts* to the set of in-memory tasks.
"""
if self.validate:
assert ts not in ws.has_what
ts.who_has.add(ws)
ws.has_what.add(ts)
ws.nbytes += ts.get_nbytes()
deps = ts.dependents
if len(deps) > 1:
deps = sorted(deps, key=operator.attrgetter("priority"), reverse=True)
for dts in deps:
s = dts.waiting_on
if ts in s:
s.discard(ts)
if not s: # new task ready to run
recommendations[dts.key] = "processing"
for dts in ts.dependencies:
s = dts.waiters
s.discard(ts)
if not s and not dts.who_wants:
recommendations[dts.key] = "released"
if not ts.waiters and not ts.who_wants:
recommendations[ts.key] = "released"
else:
msg = {"op": "key-in-memory", "key": ts.key}
if type is not None:
msg["type"] = type
self.report(msg)
ts.state = "memory"
ts.type = typename
ts.group.types.add(typename)
cs = self.clients["fire-and-forget"]
if ts in cs.wants_what:
self.client_releases_keys(client="fire-and-forget", keys=[ts.key])
def transition_released_waiting(self, key):
try:
ts = self.tasks[key]
if self.validate:
assert ts.run_spec
assert not ts.waiting_on
assert not ts.who_has
assert not ts.processing_on
assert not any(dts.state == "forgotten" for dts in ts.dependencies)
if ts.has_lost_dependencies:
return {key: "forgotten"}
ts.state = "waiting"
recommendations = {}
for dts in ts.dependencies:
if dts.exception_blame:
ts.exception_blame = dts.exception_blame
recommendations[key] = "erred"
return recommendations
for dts in ts.dependencies:
dep = dts.key
if not dts.who_has:
ts.waiting_on.add(dts)
if dts.state == "released":
recommendations[dep] = "waiting"
else:
dts.waiters.add(ts)
ts.waiters = {dts for dts in ts.dependents if dts.state == "waiting"}
if not ts.waiting_on:
if self.workers:
recommendations[key] = "processing"
else:
self.unrunnable.add(ts)
ts.state = "no-worker"
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_no_worker_waiting(self, key):
try:
ts = self.tasks[key]
if self.validate:
assert ts in self.unrunnable
assert not ts.waiting_on
assert not ts.who_has
assert not ts.processing_on
self.unrunnable.remove(ts)
if ts.has_lost_dependencies:
return {key: "forgotten"}
recommendations = {}
for dts in ts.dependencies:
dep = dts.key
if not dts.who_has:
ts.waiting_on.add(dts)
if dts.state == "released":
recommendations[dep] = "waiting"
else:
dts.waiters.add(ts)
ts.state = "waiting"
if not ts.waiting_on:
if self.workers:
recommendations[key] = "processing"
else:
self.unrunnable.add(ts)
ts.state = "no-worker"
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def decide_worker(self, ts):
"""
Decide on a worker for task *ts*. Return a WorkerState.
"""
valid_workers = self.valid_workers(ts)
if not valid_workers and not ts.loose_restrictions and self.workers:
self.unrunnable.add(ts)
ts.state = "no-worker"
return None
if ts.dependencies or valid_workers is not True:
worker = decide_worker(
ts,
self.workers.values(),
valid_workers,
partial(self.worker_objective, ts),
)
elif self.idle:
if len(self.idle) < 20: # smart but linear in small case
worker = min(self.idle, key=operator.attrgetter("occupancy"))
else: # dumb but fast in large case
worker = self.idle[self.n_tasks % len(self.idle)]
else:
if len(self.workers) < 20: # smart but linear in small case
worker = min(
self.workers.values(), key=operator.attrgetter("occupancy")
)
else: # dumb but fast in large case
worker = self.workers.values()[self.n_tasks % len(self.workers)]
if self.validate:
assert worker is None or isinstance(worker, WorkerState), (
type(worker),
worker,
)
assert worker.address in self.workers
return worker
def transition_waiting_processing(self, key):
try:
ts = self.tasks[key]
if self.validate:
assert not ts.waiting_on
assert not ts.who_has
assert not ts.exception_blame
assert not ts.processing_on
assert not ts.has_lost_dependencies
assert ts not in self.unrunnable
assert all(dts.who_has for dts in ts.dependencies)
ws = self.decide_worker(ts)
if ws is None:
return {}
worker = ws.address
duration = self.get_task_duration(ts)
comm = self.get_comm_cost(ts, ws)
ws.processing[ts] = duration + comm
ts.processing_on = ws
ws.occupancy += duration + comm
self.total_occupancy += duration + comm
ts.state = "processing"
self.consume_resources(ts, ws)
self.check_idle_saturated(ws)
self.n_tasks += 1
if ts.actor:
ws.actors.add(ts)
# logger.debug("Send job to worker: %s, %s", worker, key)
self.send_task_to_worker(worker, key)
return {}
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_waiting_memory(self, key, nbytes=None, worker=None, **kwargs):
try:
ws = self.workers[worker]
ts = self.tasks[key]
if self.validate:
assert not ts.processing_on
assert ts.waiting_on
assert ts.state == "waiting"
ts.waiting_on.clear()
if nbytes is not None:
ts.set_nbytes(nbytes)
self.check_idle_saturated(ws)
recommendations = {}
self._add_to_memory(ts, ws, recommendations, **kwargs)
if self.validate:
assert not ts.processing_on
assert not ts.waiting_on
assert ts.who_has
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_processing_memory(
self,
key,
nbytes=None,
type=None,
typename=None,
worker=None,
startstops=None,
**kwargs,
):
try:
ts = self.tasks[key]
assert worker
assert isinstance(worker, str)
if self.validate:
assert ts.processing_on
ws = ts.processing_on
assert ts in ws.processing
assert not ts.waiting_on
assert not ts.who_has, (ts, ts.who_has)
assert not ts.exception_blame
assert ts.state == "processing"
ws = self.workers.get(worker)
if ws is None:
return {key: "released"}
if ws != ts.processing_on: # someone else has this task
logger.info(
"Unexpected worker completed task, likely due to"
" work stealing. Expected: %s, Got: %s, Key: %s",
ts.processing_on,
ws,
key,
)
return {}
if startstops:
L = list()
for startstop in startstops:
stop = startstop["stop"]
start = startstop["start"]
action = startstop["action"]
if action == "compute":
L.append((start, stop))
# record timings of all actions -- a cheaper way of
# getting timing info compared with get_task_stream()
ts.prefix.all_durations[action] += stop - start
if len(L) > 0:
compute_start, compute_stop = L[0]
else: # This is very rare
compute_start = compute_stop = None
else:
compute_start = compute_stop = None
#############################
# Update Timing Information #
#############################
if compute_start and ws.processing.get(ts, True):
# Update average task duration for worker
old_duration = ts.prefix.duration_average or 0
new_duration = compute_stop - compute_start
if not old_duration:
avg_duration = new_duration
else:
avg_duration = 0.5 * old_duration + 0.5 * new_duration
ts.prefix.duration_average = avg_duration
ts.group.duration += new_duration
for tts in self.unknown_durations.pop(ts.prefix.name, ()):
if tts.processing_on:
wws = tts.processing_on
old = wws.processing[tts]
comm = self.get_comm_cost(tts, wws)
wws.processing[tts] = avg_duration + comm
wws.occupancy += avg_duration + comm - old
self.total_occupancy += avg_duration + comm - old
############################
# Update State Information #
############################
if nbytes is not None:
ts.set_nbytes(nbytes)
recommendations = {}
self._remove_from_processing(ts)
self._add_to_memory(ts, ws, recommendations, type=type, typename=typename)
if self.validate:
assert not ts.processing_on
assert not ts.waiting_on
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_memory_released(self, key, safe=False):
try:
ts = self.tasks[key]
if self.validate:
assert not ts.waiting_on
assert not ts.processing_on
if safe:
assert not ts.waiters
if ts.actor:
for ws in ts.who_has:
ws.actors.discard(ts)
if ts.who_wants:
ts.exception_blame = ts
ts.exception = "Worker holding Actor was lost"
return {ts.key: "erred"} # don't try to recreate
recommendations = {}
for dts in ts.waiters:
if dts.state in ("no-worker", "processing"):
recommendations[dts.key] = "waiting"
elif dts.state == "waiting":
dts.waiting_on.add(ts)
# XXX factor this out?
for ws in ts.who_has:
ws.has_what.remove(ts)
ws.nbytes -= ts.get_nbytes()
ts.group.nbytes_in_memory -= ts.get_nbytes()
self.worker_send(
ws.address, {"op": "delete-data", "keys": [key], "report": False}
)
ts.who_has.clear()
ts.state = "released"
self.report({"op": "lost-data", "key": key})
if not ts.run_spec: # pure data
recommendations[key] = "forgotten"
elif ts.has_lost_dependencies:
recommendations[key] = "forgotten"
elif ts.who_wants or ts.waiters:
recommendations[key] = "waiting"
if self.validate:
assert not ts.waiting_on
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_released_erred(self, key):
try:
ts = self.tasks[key]
if self.validate:
with log_errors(pdb=LOG_PDB):
assert ts.exception_blame
assert not ts.who_has
assert not ts.waiting_on
assert not ts.waiters
recommendations = {}
failing_ts = ts.exception_blame
for dts in ts.dependents:
dts.exception_blame = failing_ts
if not dts.who_has:
recommendations[dts.key] = "erred"
self.report(
{
"op": "task-erred",
"key": key,
"exception": failing_ts.exception,
"traceback": failing_ts.traceback,
}
)
ts.state = "erred"
# TODO: waiting data?
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_erred_released(self, key):
try:
ts = self.tasks[key]
if self.validate:
with log_errors(pdb=LOG_PDB):
assert all(dts.state != "erred" for dts in ts.dependencies)
assert ts.exception_blame
assert not ts.who_has
assert not ts.waiting_on
assert not ts.waiters
recommendations = {}
ts.exception = None
ts.exception_blame = None
ts.traceback = None
for dep in ts.dependents:
if dep.state == "erred":
recommendations[dep.key] = "waiting"
self.report({"op": "task-retried", "key": key})
ts.state = "released"
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_waiting_released(self, key):
try:
ts = self.tasks[key]
if self.validate:
assert not ts.who_has
assert not ts.processing_on
recommendations = {}
for dts in ts.dependencies:
s = dts.waiters
if ts in s:
s.discard(ts)
if not s and not dts.who_wants:
recommendations[dts.key] = "released"
ts.waiting_on.clear()
ts.state = "released"
if ts.has_lost_dependencies:
recommendations[key] = "forgotten"
elif not ts.exception_blame and (ts.who_wants or ts.waiters):
recommendations[key] = "waiting"
else:
ts.waiters.clear()
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_processing_released(self, key):
try:
ts = self.tasks[key]
if self.validate:
assert ts.processing_on
assert not ts.who_has
assert not ts.waiting_on
assert self.tasks[key].state == "processing"
self._remove_from_processing(
ts, send_worker_msg={"op": "release-task", "key": key}
)
ts.state = "released"
recommendations = {}
if ts.has_lost_dependencies:
recommendations[key] = "forgotten"
elif ts.waiters or ts.who_wants:
recommendations[key] = "waiting"
if recommendations.get(key) != "waiting":
for dts in ts.dependencies:
if dts.state != "released":
s = dts.waiters
s.discard(ts)
if not s and not dts.who_wants:
recommendations[dts.key] = "released"
ts.waiters.clear()
if self.validate:
assert not ts.processing_on
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_processing_erred(
self, key, cause=None, exception=None, traceback=None, **kwargs
):
try:
ts = self.tasks[key]
if self.validate:
assert cause or ts.exception_blame
assert ts.processing_on
assert not ts.who_has
assert not ts.waiting_on
if ts.actor:
ws = ts.processing_on
ws.actors.remove(ts)
self._remove_from_processing(ts)
if exception is not None:
ts.exception = exception
if traceback is not None:
ts.traceback = traceback
if cause is not None:
failing_ts = self.tasks[cause]
ts.exception_blame = failing_ts
else:
failing_ts = ts.exception_blame
recommendations = {}
for dts in ts.dependents:
dts.exception_blame = failing_ts
recommendations[dts.key] = "erred"
for dts in ts.dependencies:
s = dts.waiters
s.discard(ts)
if not s and not dts.who_wants:
recommendations[dts.key] = "released"
ts.waiters.clear() # do anything with this?
ts.state = "erred"
self.report(
{
"op": "task-erred",
"key": key,
"exception": failing_ts.exception,
"traceback": failing_ts.traceback,
}
)
cs = self.clients["fire-and-forget"]
if ts in cs.wants_what:
self.client_releases_keys(client="fire-and-forget", keys=[key])
if self.validate:
assert not ts.processing_on
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_no_worker_released(self, key):
try:
ts = self.tasks[key]
if self.validate:
assert self.tasks[key].state == "no-worker"
assert not ts.who_has
assert not ts.waiting_on
self.unrunnable.remove(ts)
ts.state = "released"
for dts in ts.dependencies:
dts.waiters.discard(ts)
ts.waiters.clear()
return {}
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def remove_key(self, key):
ts = self.tasks.pop(key)
assert ts.state == "forgotten"
self.unrunnable.discard(ts)
for cs in ts.who_wants:
cs.wants_what.remove(ts)
ts.who_wants.clear()
ts.processing_on = None
ts.exception_blame = ts.exception = ts.traceback = None
if key in self.task_metadata:
del self.task_metadata[key]
def _propagate_forgotten(self, ts, recommendations):
ts.state = "forgotten"
key = ts.key
for dts in ts.dependents:
dts.has_lost_dependencies = True
dts.dependencies.remove(ts)
dts.waiting_on.discard(ts)
if dts.state not in ("memory", "erred"):
# Cannot compute task anymore
recommendations[dts.key] = "forgotten"
ts.dependents.clear()
ts.waiters.clear()
for dts in ts.dependencies:
dts.dependents.remove(ts)
s = dts.waiters
s.discard(ts)
if not dts.dependents and not dts.who_wants:
# Task not needed anymore
assert dts is not ts
recommendations[dts.key] = "forgotten"
ts.dependencies.clear()
ts.waiting_on.clear()
if ts.who_has:
ts.group.nbytes_in_memory -= ts.get_nbytes()
for ws in ts.who_has:
ws.has_what.remove(ts)
ws.nbytes -= ts.get_nbytes()
w = ws.address
if w in self.workers: # in case worker has died
self.worker_send(
w, {"op": "delete-data", "keys": [key], "report": False}
)
ts.who_has.clear()
def transition_memory_forgotten(self, key):
try:
ts = self.tasks[key]
if self.validate:
assert ts.state == "memory"
assert not ts.processing_on
assert not ts.waiting_on
if not ts.run_spec:
# It's ok to forget a pure data task
pass
elif ts.has_lost_dependencies:
# It's ok to forget a task with forgotten dependencies
pass
elif not ts.who_wants and not ts.waiters and not ts.dependents:
# It's ok to forget a task that nobody needs
pass
else:
assert 0, (ts,)
recommendations = {}
if ts.actor:
for ws in ts.who_has:
ws.actors.discard(ts)
self._propagate_forgotten(ts, recommendations)
self.report_on_key(ts=ts)
self.remove_key(key)
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_released_forgotten(self, key):
try:
ts = self.tasks[key]
if self.validate:
assert ts.state in ("released", "erred")
assert not ts.who_has
assert not ts.processing_on
assert not ts.waiting_on, (ts, ts.waiting_on)
if not ts.run_spec:
# It's ok to forget a pure data task
pass
elif ts.has_lost_dependencies:
# It's ok to forget a task with forgotten dependencies
pass
elif not ts.who_wants and not ts.waiters and not ts.dependents:
# It's ok to forget a task that nobody needs
pass
else:
assert 0, (ts,)
recommendations = {}
self._propagate_forgotten(ts, recommendations)
self.report_on_key(ts=ts)
self.remove_key(key)
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition(self, key, finish, *args, **kwargs):
""" Transition a key from its current state to the finish state
Examples
--------
>>> self.transition('x', 'waiting')
{'x': 'processing'}
Returns
-------
Dictionary of recommendations for future transitions
See Also
--------
Scheduler.transitions: transitive version of this function
"""
try:
try:
ts = self.tasks[key]
except KeyError:
return {}
start = ts.state
if start == finish:
return {}
if self.plugins:
dependents = set(ts.dependents)
dependencies = set(ts.dependencies)
if (start, finish) in self._transitions:
func = self._transitions[start, finish]
recommendations = func(key, *args, **kwargs)
elif "released" not in (start, finish):
func = self._transitions["released", finish]
assert not args and not kwargs
a = self.transition(key, "released")
if key in a:
func = self._transitions["released", a[key]]
b = func(key)
a = a.copy()
a.update(b)
recommendations = a
start = "released"
else:
raise RuntimeError(
"Impossible transition from %r to %r" % (start, finish)
)
finish2 = ts.state
self.transition_log.append((key, start, finish2, recommendations, time()))
if self.validate:
logger.debug(
"Transitioned %r %s->%s (actual: %s). Consequence: %s",
key,
start,
finish2,
ts.state,
dict(recommendations),
)
if self.plugins:
# Temporarily put back forgotten key for plugin to retrieve it
if ts.state == "forgotten":
try:
ts.dependents = dependents
ts.dependencies = dependencies
except KeyError:
pass
self.tasks[ts.key] = ts
for plugin in list(self.plugins):
try:
plugin.transition(key, start, finish2, *args, **kwargs)
except Exception:
logger.info("Plugin failed with exception", exc_info=True)
if ts.state == "forgotten":
del self.tasks[ts.key]
if ts.state == "forgotten" and ts.group.name in self.task_groups:
# Remove TaskGroup if all tasks are in the forgotten state
tg = ts.group
if not any(tg.states.get(s) for s in ALL_TASK_STATES):
ts.prefix.groups.remove(tg)
del self.task_groups[tg.name]
return recommendations
except Exception as e:
logger.exception("Error transitioning %r from %r to %r", key, start, finish)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transitions(self, recommendations):
""" Process transitions until none are left
This includes feedback from previous transitions and continues until we
reach a steady state
"""
keys = set()
recommendations = recommendations.copy()
while recommendations:
key, finish = recommendations.popitem()
keys.add(key)
new = self.transition(key, finish)
recommendations.update(new)
if self.validate:
for key in keys:
self.validate_key(key)
def story(self, *keys):
""" Get all transitions that touch one of the input keys """
keys = set(keys)
return [
t for t in self.transition_log if t[0] in keys or keys.intersection(t[3])
]
transition_story = story
def reschedule(self, key=None, worker=None):
""" Reschedule a task
Things may have shifted and this task may now be better suited to run
elsewhere
"""
try:
ts = self.tasks[key]
except KeyError:
logger.warning(
"Attempting to reschedule task {}, which was not "
"found on the scheduler. Aborting reschedule.".format(key)
)
return
if ts.state != "processing":
return
if worker and ts.processing_on.address != worker:
return
self.transitions({key: "released"})
##############################
# Assigning Tasks to Workers #
##############################
def check_idle_saturated(self, ws, occ=None):
""" Update the status of the idle and saturated state
The scheduler keeps track of workers that are ..
- Saturated: have enough work to stay busy
- Idle: do not have enough work to stay busy
They are considered saturated if they both have enough tasks to occupy
all of their threads, and if the expected runtime of those tasks is
large enough.
This is useful for load balancing and adaptivity.
"""
if self.total_nthreads == 0 or ws.status == Status.closed:
return
if occ is None:
occ = ws.occupancy
nc = ws.nthreads
p = len(ws.processing)
avg = self.total_occupancy / self.total_nthreads
if p < nc or occ / nc < avg / 2:
self.idle.add(ws)
self.saturated.discard(ws)
else:
self.idle.discard(ws)
pending = occ * (p - nc) / p / nc
if p > nc and pending > 0.4 and pending > 1.9 * avg:
self.saturated.add(ws)
else:
self.saturated.discard(ws)
def valid_workers(self, ts):
""" Return set of currently valid workers for key
If all workers are valid then this returns ``True``.
This checks tracks the following state:
* worker_restrictions
* host_restrictions
* resource_restrictions
"""
s = True
if ts.worker_restrictions:
s = {w for w in ts.worker_restrictions if w in self.workers}
if ts.host_restrictions:
# Resolve the alias here rather than early, for the worker
# may not be connected when host_restrictions is populated
hr = [self.coerce_hostname(h) for h in ts.host_restrictions]
# XXX need HostState?
ss = [self.host_info[h]["addresses"] for h in hr if h in self.host_info]
ss = set.union(*ss) if ss else set()
if s is True:
s = ss
else:
s |= ss
if ts.resource_restrictions:
w = {
resource: {
w
for w, supplied in self.resources[resource].items()
if supplied >= required
}
for resource, required in ts.resource_restrictions.items()
}
ww = set.intersection(*w.values())
if s is True:
s = ww
else:
s &= ww
if s is True:
return s
else:
return {self.workers[w] for w in s}
def consume_resources(self, ts, ws):
if ts.resource_restrictions:
for r, required in ts.resource_restrictions.items():
ws.used_resources[r] += required
def release_resources(self, ts, ws):
if ts.resource_restrictions:
for r, required in ts.resource_restrictions.items():
ws.used_resources[r] -= required
#####################
# Utility functions #
#####################
def add_resources(self, comm=None, worker=None, resources=None):
ws = self.workers[worker]
if resources:
ws.resources.update(resources)
ws.used_resources = {}
for resource, quantity in ws.resources.items():
ws.used_resources[resource] = 0
self.resources[resource][worker] = quantity
return "OK"
def remove_resources(self, worker):
ws = self.workers[worker]
for resource, quantity in ws.resources.items():
del self.resources[resource][worker]
def coerce_address(self, addr, resolve=True):
"""
Coerce possible input addresses to canonical form.
*resolve* can be disabled for testing with fake hostnames.
Handles strings, tuples, or aliases.
"""
# XXX how many address-parsing routines do we have?
if addr in self.aliases:
addr = self.aliases[addr]
if isinstance(addr, tuple):
addr = unparse_host_port(*addr)
if not isinstance(addr, str):
raise TypeError("addresses should be strings or tuples, got %r" % (addr,))
if resolve:
addr = resolve_address(addr)
else:
addr = normalize_address(addr)
return addr
def coerce_hostname(self, host):
"""
Coerce the hostname of a worker.
"""
if host in self.aliases:
return self.workers[self.aliases[host]].host
else:
return host
def workers_list(self, workers):
"""
List of qualifying workers
Takes a list of worker addresses or hostnames.
Returns a list of all worker addresses that match
"""
if workers is None:
return list(self.workers)
out = set()
for w in workers:
if ":" in w:
out.add(w)
else:
out.update({ww for ww in self.workers if w in ww}) # TODO: quadratic
return list(out)
def start_ipython(self, comm=None):
"""Start an IPython kernel
Returns Jupyter connection info dictionary.
"""
from ._ipython_utils import start_ipython
if self._ipython_kernel is None:
self._ipython_kernel = start_ipython(
ip=self.ip, ns={"scheduler": self}, log=logger
)
return self._ipython_kernel.get_connection_info()
def worker_objective(self, ts, ws):
"""
Objective function to determine which worker should get the task
Minimize expected start time. If a tie then break with data storage.
"""
comm_bytes = sum(
[dts.get_nbytes() for dts in ts.dependencies if ws not in dts.who_has]
)
stack_time = ws.occupancy / ws.nthreads
start_time = comm_bytes / self.bandwidth + stack_time
if ts.actor:
return (len(ws.actors), start_time, ws.nbytes)
else:
return (start_time, ws.nbytes)
async def get_profile(
self,
comm=None,
workers=None,
scheduler=False,
server=False,
merge_workers=True,
start=None,
stop=None,
key=None,
):
if workers is None:
workers = self.workers
else:
workers = set(self.workers) & set(workers)
if scheduler:
return profile.get_profile(self.io_loop.profile, start=start, stop=stop)
results = await asyncio.gather(
*(
self.rpc(w).profile(start=start, stop=stop, key=key, server=server)
for w in workers
),
return_exceptions=True,
)
results = [r for r in results if not isinstance(r, Exception)]
if merge_workers:
response = profile.merge(*results)
else:
response = dict(zip(workers, results))
return response
async def get_profile_metadata(
self,
comm=None,
workers=None,
merge_workers=True,
start=None,
stop=None,
profile_cycle_interval=None,
):
dt = profile_cycle_interval or dask.config.get(
"distributed.worker.profile.cycle"
)
dt = parse_timedelta(dt, default="ms")
if workers is None:
workers = self.workers
else:
workers = set(self.workers) & set(workers)
results = await asyncio.gather(
*(self.rpc(w).profile_metadata(start=start, stop=stop) for w in workers),
return_exceptions=True,
)
results = [r for r in results if not isinstance(r, Exception)]
counts = [v["counts"] for v in results]
counts = itertools.groupby(merge_sorted(*counts), lambda t: t[0] // dt * dt)
counts = [(time, sum(pluck(1, group))) for time, group in counts]
keys = set()
for v in results:
for t, d in v["keys"]:
for k in d:
keys.add(k)
keys = {k: [] for k in keys}
groups1 = [v["keys"] for v in results]
groups2 = list(merge_sorted(*groups1, key=first))
last = 0
for t, d in groups2:
tt = t // dt * dt
if tt > last:
last = tt
for k, v in keys.items():
v.append([tt, 0])
for k, v in d.items():
keys[k][-1][1] += v
return {"counts": counts, "keys": keys}
async def performance_report(self, comm=None, start=None, code=""):
stop = time()
# Profiles
compute, scheduler, workers = await asyncio.gather(
*[
self.get_profile(start=start),
self.get_profile(scheduler=True, start=start),
self.get_profile(server=True, start=start),
]
)
from . import profile
def profile_to_figure(state):
data = profile.plot_data(state)
figure, source = profile.plot_figure(data, sizing_mode="stretch_both")
return figure
compute, scheduler, workers = map(
profile_to_figure, (compute, scheduler, workers)
)
# Task stream
task_stream = self.get_task_stream(start=start)
total_tasks = len(task_stream)
timespent = defaultdict(int)
for d in task_stream:
for x in d.get("startstops", []):
timespent[x["action"]] += x["stop"] - x["start"]
tasks_timings = ""
for k in sorted(timespent.keys()):
tasks_timings += f"\n<li> {k} time: {format_time(timespent[k])} </li>"
from .diagnostics.task_stream import rectangles
from .dashboard.components.scheduler import task_stream_figure
rects = rectangles(task_stream)
source, task_stream = task_stream_figure(sizing_mode="stretch_both")
source.data.update(rects)
from distributed.dashboard.components.scheduler import (
BandwidthWorkers,
BandwidthTypes,
)
bandwidth_workers = BandwidthWorkers(self, sizing_mode="stretch_both")
bandwidth_workers.update()
bandwidth_types = BandwidthTypes(self, sizing_mode="stretch_both")
bandwidth_types.update()
from bokeh.models import Panel, Tabs, Div
# HTML
html = """
<h1> Dask Performance Report </h1>
<i> Select different tabs on the top for additional information </i>
<h2> Duration: {time} </h2>
<h2> Tasks Information </h2>
<ul>
<li> number of tasks: {ntasks} </li>
{tasks_timings}
</ul>
<h2> Scheduler Information </h2>
<ul>
<li> Address: {address} </li>
<li> Workers: {nworkers} </li>
<li> Threads: {threads} </li>
<li> Memory: {memory} </li>
</ul>
<h2> Calling Code </h2>
<pre>
{code}
</pre>
""".format(
time=format_time(stop - start),
ntasks=total_tasks,
tasks_timings=tasks_timings,
address=self.address,
nworkers=len(self.workers),
threads=sum(w.nthreads for w in self.workers.values()),
memory=format_bytes(sum(w.memory_limit for w in self.workers.values())),
code=code,
)
html = Div(text=html)
html = Panel(child=html, title="Summary")
compute = Panel(child=compute, title="Worker Profile (compute)")
workers = Panel(child=workers, title="Worker Profile (administrative)")
scheduler = Panel(child=scheduler, title="Scheduler Profile (administrative)")
task_stream = Panel(child=task_stream, title="Task Stream")
bandwidth_workers = Panel(
child=bandwidth_workers.fig, title="Bandwidth (Workers)"
)
bandwidth_types = Panel(child=bandwidth_types.fig, title="Bandwidth (Types)")
tabs = Tabs(
tabs=[
html,
task_stream,
compute,
workers,
scheduler,
bandwidth_workers,
bandwidth_types,
]
)
from bokeh.plotting import save, output_file
from bokeh.core.templates import get_env
with tmpfile(extension=".html") as fn:
output_file(filename=fn, title="Dask Performance Report")
template_directory = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "dashboard", "templates"
)
template_environment = get_env()
template_environment.loader.searchpath.append(template_directory)
template = template_environment.get_template("performance_report.html")
save(tabs, filename=fn, template=template)
with open(fn) as f:
data = f.read()
return data
async def get_worker_logs(self, comm=None, n=None, workers=None, nanny=False):
results = await self.broadcast(
msg={"op": "get_logs", "n": n}, workers=workers, nanny=nanny
)
return results
###########
# Cleanup #
###########
def reevaluate_occupancy(self, worker_index=0):
""" Periodically reassess task duration time
The expected duration of a task can change over time. Unfortunately we
don't have a good constant-time way to propagate the effects of these
changes out to the summaries that they affect, like the total expected
runtime of each of the workers, or what tasks are stealable.
In this coroutine we walk through all of the workers and re-align their
estimates with the current state of tasks. We do this periodically
rather than at every transition, and we only do it if the scheduler
process isn't under load (using psutil.Process.cpu_percent()). This
lets us avoid this fringe optimization when we have better things to
think about.
"""
DELAY = 0.1
try:
if self.status == Status.closed:
return
last = time()
next_time = timedelta(seconds=DELAY)
if self.proc.cpu_percent() < 50:
workers = list(self.workers.values())
for i in range(len(workers)):
ws = workers[worker_index % len(workers)]
worker_index += 1
try:
if ws is None or not ws.processing:
continue
self._reevaluate_occupancy_worker(ws)
finally:
del ws # lose ref
duration = time() - last
if duration > 0.005: # 5ms since last release
next_time = timedelta(seconds=duration * 5) # 25ms gap
break
self.loop.add_timeout(
next_time, self.reevaluate_occupancy, worker_index=worker_index
)
except Exception:
logger.error("Error in reevaluate occupancy", exc_info=True)
raise
def _reevaluate_occupancy_worker(self, ws):
""" See reevaluate_occupancy """
old = ws.occupancy
new = 0
nbytes = 0
for ts in ws.processing:
duration = self.get_task_duration(ts)
comm = self.get_comm_cost(ts, ws)
ws.processing[ts] = duration + comm
new += duration + comm
ws.occupancy = new
self.total_occupancy += new - old
self.check_idle_saturated(ws)
# significant increase in duration
if (new > old * 1.3) and ("stealing" in self.extensions):
steal = self.extensions["stealing"]
for ts in ws.processing:
steal.remove_key_from_stealable(ts)
steal.put_key_in_stealable(ts)
async def check_worker_ttl(self):
now = time()
for ws in self.workers.values():
if ws.last_seen < now - self.worker_ttl:
logger.warning(
"Worker failed to heartbeat within %s seconds. Closing: %s",
self.worker_ttl,
ws,
)
await self.remove_worker(address=ws.address)
def check_idle(self):
if any(ws.processing for ws in self.workers.values()) or self.unrunnable:
self.idle_since = None
return
elif not self.idle_since:
self.idle_since = time()
if time() > self.idle_since + self.idle_timeout:
logger.info(
"Scheduler closing after being idle for %s",
format_time(self.idle_timeout),
)
self.loop.add_callback(self.close)
def adaptive_target(self, comm=None, target_duration=None):
""" Desired number of workers based on the current workload
This looks at the current running tasks and memory use, and returns a
number of desired workers. This is often used by adaptive scheduling.
Parameters
----------
target_duration: str
A desired duration of time for computations to take. This affects
how rapidly the scheduler will ask to scale.
See Also
--------
distributed.deploy.Adaptive
"""
if target_duration is None:
target_duration = dask.config.get("distributed.adaptive.target-duration")
target_duration = parse_timedelta(target_duration)
# CPU
cpu = math.ceil(
self.total_occupancy / target_duration
) # TODO: threads per worker
# Avoid a few long tasks from asking for many cores
tasks_processing = 0
for ws in self.workers.values():
tasks_processing += len(ws.processing)
if tasks_processing > cpu:
break
else:
cpu = min(tasks_processing, cpu)
if self.unrunnable and not self.workers:
cpu = max(1, cpu)
# Memory
limit_bytes = {addr: ws.memory_limit for addr, ws in self.workers.items()}
worker_bytes = [ws.nbytes for ws in self.workers.values()]
limit = sum(limit_bytes.values())
total = sum(worker_bytes)
if total > 0.6 * limit:
memory = 2 * len(self.workers)
else:
memory = 0
target = max(memory, cpu)
if target >= len(self.workers):
return target
else: # Scale down?
to_close = self.workers_to_close()
return len(self.workers) - len(to_close)
def decide_worker(ts, all_workers, valid_workers, objective):
"""
Decide which worker should take task *ts*.
We choose the worker that has the data on which *ts* depends.
If several workers have dependencies then we choose the less-busy worker.
Optionally provide *valid_workers* of where jobs are allowed to occur
(if all workers are allowed to take the task, pass True instead).
If the task requires data communication because no eligible worker has
all the dependencies already, then we choose to minimize the number
of bytes sent between workers. This is determined by calling the
*objective* function.
"""
deps = ts.dependencies
assert all(dts.who_has for dts in deps)
if ts.actor:
candidates = all_workers
else:
candidates = frequencies([ws for dts in deps for ws in dts.who_has])
if valid_workers is True:
if not candidates:
candidates = all_workers
else:
candidates = valid_workers & set(candidates)
if not candidates:
candidates = valid_workers
if not candidates:
if ts.loose_restrictions:
return decide_worker(ts, all_workers, True, objective)
else:
return None
if not candidates:
return None
if len(candidates) == 1:
return first(candidates)
return min(candidates, key=objective)
def validate_task_state(ts):
"""
Validate the given TaskState.
"""
assert ts.state in ALL_TASK_STATES or ts.state == "forgotten", ts
if ts.waiting_on:
assert ts.waiting_on.issubset(ts.dependencies), (
"waiting not subset of dependencies",
str(ts.waiting_on),
str(ts.dependencies),
)
if ts.waiters:
assert ts.waiters.issubset(ts.dependents), (
"waiters not subset of dependents",
str(ts.waiters),
str(ts.dependents),
)
for dts in ts.waiting_on:
assert not dts.who_has, ("waiting on in-memory dep", str(ts), str(dts))
assert dts.state != "released", ("waiting on released dep", str(ts), str(dts))
for dts in ts.dependencies:
assert ts in dts.dependents, (
"not in dependency's dependents",
str(ts),
str(dts),
str(dts.dependents),
)
if ts.state in ("waiting", "processing"):
assert dts in ts.waiting_on or dts.who_has, (
"dep missing",
str(ts),
str(dts),
)
assert dts.state != "forgotten"
for dts in ts.waiters:
assert dts.state in ("waiting", "processing"), (
"waiter not in play",
str(ts),
str(dts),
)
for dts in ts.dependents:
assert ts in dts.dependencies, (
"not in dependent's dependencies",
str(ts),
str(dts),
str(dts.dependencies),
)
assert dts.state != "forgotten"
assert (ts.processing_on is not None) == (ts.state == "processing")
assert bool(ts.who_has) == (ts.state == "memory"), (ts, ts.who_has)
if ts.state == "processing":
assert all(dts.who_has for dts in ts.dependencies), (
"task processing without all deps",
str(ts),
str(ts.dependencies),
)
assert not ts.waiting_on
if ts.who_has:
assert ts.waiters or ts.who_wants, (
"unneeded task in memory",
str(ts),
str(ts.who_has),
)
if ts.run_spec: # was computed
assert ts.type
assert isinstance(ts.type, str)
assert not any(ts in dts.waiting_on for dts in ts.dependents)
for ws in ts.who_has:
assert ts in ws.has_what, (
"not in who_has' has_what",
str(ts),
str(ws),
str(ws.has_what),
)
if ts.who_wants:
for cs in ts.who_wants:
assert ts in cs.wants_what, (
"not in who_wants' wants_what",
str(ts),
str(cs),
str(cs.wants_what),
)
if ts.actor:
if ts.state == "memory":
assert sum([ts in ws.actors for ws in ts.who_has]) == 1
if ts.state == "processing":
assert ts in ts.processing_on.actors
def validate_worker_state(ws):
for ts in ws.has_what:
assert ws in ts.who_has, (
"not in has_what' who_has",
str(ws),
str(ts),
str(ts.who_has),
)
for ts in ws.actors:
assert ts.state in ("memory", "processing")
def validate_state(tasks, workers, clients):
"""
Validate a current runtime state
This performs a sequence of checks on the entire graph, running in about
linear time. This raises assert errors if anything doesn't check out.
"""
for ts in tasks.values():
validate_task_state(ts)
for ws in workers.values():
validate_worker_state(ws)
for cs in clients.values():
for ts in cs.wants_what:
assert cs in ts.who_wants, (
"not in wants_what' who_wants",
str(cs),
str(ts),
str(ts.who_wants),
)
_round_robin = [0]
def heartbeat_interval(n):
"""
Interval in seconds that we desire heartbeats based on number of workers
"""
if n <= 10:
return 0.5
elif n < 50:
return 1
elif n < 200:
return 2
else:
return 5
class KilledWorker(Exception):
def __init__(self, task, last_worker):
super(KilledWorker, self).__init__(task, last_worker)
self.task = task
self.last_worker = last_worker
class WorkerStatusPlugin(SchedulerPlugin):
"""
An plugin to share worker status with a remote observer
This is used in cluster managers to keep updated about the status of the
scheduler.
"""
def __init__(self, scheduler, comm):
self.bcomm = BatchedSend(interval="5ms")
self.bcomm.start(comm)
self.scheduler = scheduler
self.scheduler.add_plugin(self)
def add_worker(self, worker=None, **kwargs):
ident = self.scheduler.workers[worker].identity()
del ident["metrics"]
del ident["last_seen"]
try:
self.bcomm.send(["add", {"workers": {worker: ident}}])
except CommClosedError:
self.scheduler.remove_plugin(self)
def remove_worker(self, worker=None, **kwargs):
try:
self.bcomm.send(["remove", worker])
except CommClosedError:
self.scheduler.remove_plugin(self)
def teardown(self):
self.bcomm.close()
|
the-stack_0_13260 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Margin based AL method.
Samples in batches based on margin scores.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from .sampling_def import SamplingMethod
class MarginAL(SamplingMethod):
def __init__(self, X, y, seed):
self.X = X
self.y = y
self.name = 'margin'
def select_batch_(self, model, already_selected, N, **kwargs):
"""Returns batch of datapoints with smallest margin/highest uncertainty.
For binary classification, can just take the absolute distance to decision
boundary for each point.
For multiclass classification, must consider the margin between distance for
top two most likely classes.
Args:
model: scikit learn model with decision_function implemented
already_selected: index of datapoints already selected
N: batch size
Returns:
indices of points selected to add using margin active learner
"""
try:
distances = model.decision_function(self.X)
except:
distances = model.predict_proba(self.X)
if len(distances.shape) < 2:
min_margin = abs(distances)
else:
sort_distances = np.sort(distances, 1)[:, -2:]
min_margin = sort_distances[:, 1] - sort_distances[:, 0]
rank_ind = np.argsort(min_margin)
rank_ind = [i for i in rank_ind if i not in already_selected]
active_samples = rank_ind[0:N]
return active_samples
|
the-stack_0_13262 | import asyncio
import logging
import os
import sys
from logging.handlers import RotatingFileHandler
import aioredis
from cacheout import Cache
from houdini import PenguinStringCompiler
from houdini.data import db
from houdini.data.permission import PermissionCollection
from houdini.penguin import Penguin
from houdini.spheniscidae import Spheniscidae
try:
import uvloop
uvloop.install()
except ImportError:
uvloop = None
import houdini.handlers
import houdini.plugins
from houdini.handlers import XTListenerManager, XMLListenerManager, DummyEventListenerManager
from houdini.plugins import PluginManager
from houdini.commands import CommandManager
class Houdini:
def __init__(self, config):
self.server = None
self.redis = None
self.cache = None
self.config = config
self.db = db
self.peers_by_ip = {}
self.logger = None
self.client_class = Spheniscidae
self.penguin_string_compiler = None
self.anonymous_penguin_string_compiler = None
self.penguins_by_id = {}
self.penguins_by_username = {}
self.penguins_by_character_id = {}
self.igloos_by_penguin_id = {}
self.open_igloos_by_penguin_id = {}
self.xt_listeners = XTListenerManager(self)
self.xml_listeners = XMLListenerManager(self)
self.dummy_event_listeners = DummyEventListenerManager(self)
self.commands = CommandManager(self)
self.plugins = PluginManager(self)
self.permissions = None
self.chat_filter_words = None
self.items = None
self.igloos = None
self.furniture = None
self.locations = None
self.flooring = None
self.rooms = None
self.stamps = None
self.cards = None
self.postcards = None
self.puffles = None
self.puffle_items = None
self.puffle_food_treasure = None
self.puffle_furniture_treasure = None
self.puffle_clothing_treasure = None
self.characters = None
self.dance_songs = None
self.heartbeat = None
self.egg_timer = None
self.puffle_killer = None
self.music = None
self.dance_floor = None
self.match_making = None
self.water_match_making = None
self.fire_match_making = None
self.puck = (0, 0)
async def start(self):
general_log_file = self.config.logging_general_path if self.config.logging_general_path \
else f'logs/{self.config.name.lower()}.log'
errors_log_file = self.config.logging_error_path if self.config.logging_error_path \
else f'logs/{self.config.name.lower()}-errors.log'
general_log_directory = os.path.dirname(general_log_file)
errors_log_directory = os.path.dirname(errors_log_file)
if not os.path.exists(general_log_directory):
os.mkdir(general_log_directory)
if not os.path.exists(errors_log_directory):
os.mkdir(errors_log_directory)
self.logger = logging.getLogger('houdini')
universal_handler = RotatingFileHandler(general_log_file,
maxBytes=2097152, backupCount=3, encoding='utf-8')
error_handler = logging.FileHandler(errors_log_file)
console_handler = logging.StreamHandler(stream=sys.stdout)
log_formatter = logging.Formatter('%(asctime)s [%(levelname)-5.5s] %(message)s')
error_handler.setLevel(logging.ERROR)
universal_handler.setFormatter(log_formatter)
console_handler.setFormatter(log_formatter)
self.logger.addHandler(universal_handler)
self.logger.addHandler(console_handler)
self.logger.addHandler(error_handler)
level = logging.getLevelName(self.config.logging_level)
self.logger.setLevel(level)
self.server = await asyncio.start_server(
self.client_connected, self.config.address,
self.config.port
)
await self.db.set_bind('postgresql://{}:{}@{}/{}'.format(
self.config.database_username, self.config.database_password,
self.config.database_address,
self.config.database_name))
self.logger.info('Booting Houdini')
self.redis = await aioredis.create_redis_pool('redis://{}:{}'.format(
self.config.redis_address, self.config.redis_port),
minsize=5, maxsize=10)
if self.config.type == 'world':
await self.redis.delete(f'houdini.players.{self.config.id}')
await self.redis.hset(f'houdini.population', self.config.id, 0)
self.cache = Cache(maxsize=None, ttl=self.config.cache_expiry)
self.client_class = Penguin
self.penguin_string_compiler = PenguinStringCompiler()
self.anonymous_penguin_string_compiler = PenguinStringCompiler()
PenguinStringCompiler.setup_default_builder(self.penguin_string_compiler)
PenguinStringCompiler.setup_anonymous_default_builder(self.anonymous_penguin_string_compiler)
await self.xml_listeners.setup(houdini.handlers, exclude_load='houdini.handlers.login.login')
await self.xt_listeners.setup(houdini.handlers)
self.logger.info('World server started')
else:
await self.xml_listeners.setup(houdini.handlers, 'houdini.handlers.login.login')
self.logger.info('Login server started')
await self.dummy_event_listeners.setup(houdini.handlers)
await self.dummy_event_listeners.fire('boot', self)
self.permissions = await PermissionCollection.get_collection()
self.logger.info(f'Multi-client support is '
f'{"enabled" if not self.config.single_client_mode else "disabled"}')
self.logger.info(f'Listening on {self.config.address}:{self.config.port}')
if self.config.auth_key != 'houdini':
self.logger.warning('The static key has been changed from the default, '
'this may cause authentication issues!')
await self.plugins.setup(houdini.plugins)
async with self.server:
await self.server.serve_forever()
async def client_connected(self, reader, writer):
client_object = self.client_class(self, reader, writer)
await client_object.run()
|
the-stack_0_13264 | #!/bin/env python
#
# utils.py: utility functions for RnaChipIntegrator
# Copyright (C) University of Manchester 2011-15 Peter Briggs, Leo Zeef
# & Ian Donaldson
#
"""
utils.py
Utility functions for RnaChipIntegrator:
- make_errline: highlight problem fields in a string
- truncate_text: truncate a text string to a specified length
"""
def make_errline(line,bad_fields=[]):
"""Return an 'error line' indicating problem fields in a string
Given a tab-delimited line and a list of integer indices
indicating which fields in the line have problems, this function
returns a tab-delimited string where the original fields are
replaced by either spaces or '^' characters.
When printed beneath the original line, the '^'s indicate which
fields are 'bad' according to the supplied indices, e.g.
Input line: 'good good bad bad good'
Error line: ' ^^^ ^^^ '
Arguments:
line: string where tabs delimit fields
bad_fields: list of integer indices corresponding to 'bad'
values in 'line'
Returns:
Tab-delimited 'error line' to be printed beneath the original
line, to indicate which fields are 'bad'.
"""
# Indicate problem field(s)
errline = []
items = line.rstrip().split('\t')
for i in range(len(items)):
if i in bad_fields:
errline.append("^"*len(items[i]))
else:
errline.append(" "*len(items[i]))
return '\t'.join(errline)
def truncate_text(text,max_len):
"""Truncate a text string
Given a title and an optional extension, remove characters
and replace with ellipsis (i.e. ...) so that it fit into
the maxium number of characters (max_len).
"""
len_text = len(text)
if len_text <= max_len:
return text
text = text[len_text-max_len:]
return '...' + text[3:]
|
the-stack_0_13268 | import pandas as pd
chrom_sizes = pd.Series(
{1: 249250621,
10: 135534747,
11: 135006516,
12: 133851895,
13: 115169878,
14: 107349540,
15: 102531392,
16: 90354753,
17: 81195210,
18: 78077248,
19: 59128983,
2: 243199373,
20: 63025520,
21: 48129895,
22: 51304566,
3: 198022430,
4: 191154276,
5: 180915260,
6: 171115067,
7: 159138663,
8: 146364022,
9: 141213431,
}
)
chrom_sizes_norm = chrom_sizes / chrom_sizes.max()
def _make_tableau20():
# tableau20 from # http://www.randalolson.com/2014/06/28/how-to-make-beautiful-data-visualizations-in-python-with-matplotlib/
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib
# accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
return tableau20
tableau20 = _make_tableau20()
def generate_null_snvs(df, snvs, num_null_sets=5):
"""
Generate a set of null SNVs based on an input list of SNVs and categorical
annotations.
Parameters
----------
df : pandas.DataFrame
Pandas dataframe where each column is a categorization of SNPs.
The index should be SNPs of the form chrom:pos.
snvs : list
List of input SNVs in the format chrom:pos. Entries that aren't in
the index of df will be dropped.
num_null_sets : int
Number of sets of null SNVs to generate.
Returns
-------
null_sets : pandas.Dataframe
Pandas dataframe with input SNVs as first column and null SNVs as
following columns.
"""
import numpy as np
import random
random.seed(20151007)
input_snvs = list(set(df.index) & set(snvs))
sig = df.ix[input_snvs]
not_sig = df.ix[set(df.index) - set(snvs)]
sig['group'] = sig.apply(lambda x: '::'.join(x), axis=1)
not_sig['group'] = not_sig.apply(lambda x: '::'.join(x), axis=1)
null_sets = []
vc = sig.group.value_counts()
bins = {c:sorted(list(df[c].value_counts().index)) for c in df.columns}
ordered_inputs = []
for i in vc.index:
ordered_inputs += list(sig[sig.group == i].index)
tdf = not_sig[not_sig.group == i]
count = vc[i]
for n in range(num_null_sets):
if tdf.shape[0] == 0:
groups = [i]
while tdf.shape[0] == 0:
# If there are no potential null SNVs in this group, we'll
# expand the group randomly.
g = groups[-1]
# Choose random bin.
cols = list(not_sig.columns)
cols.remove('group')
b = random.choice(cols)
# Get possibilities for that bin.
t = bins[b]
# Get last set of bin values and the value for the bin we
# want to change.
d = dict(list(zip(not_sig.columns, g.split('::'))))
cat = d[b]
# Randomly walk away from bin value.
ind = t.index(cat)
if ind == 0:
ind += 1
elif ind == len(t) - 1:
ind -= 1
else:
ind += random.choice([-1, 1])
d[b] = t[ind]
groups.append('::'.join(pd.Series(d)[not_sig.columns].astype(str)))
tdf = not_sig[not_sig.group.apply(lambda x: x in groups)]
if count <= tdf.shape[0]:
ind = random.sample(tdf.index, count)
else:
ind = list(np.random.choice(tdf.index, size=count, replace=True))
if i == vc.index[0]:
null_sets.append(ind)
else:
null_sets[n] += ind
null_sets = pd.DataFrame(null_sets).T
null_sets.columns = ['null_{}'.format(x) for x in null_sets.columns]
cs = list(null_sets.columns)
null_sets['input'] = ordered_inputs
null_sets = null_sets[['input'] + cs]
return null_sets
def make_grasp_phenotype_file(fn, pheno, out):
"""
Subset the GRASP database on a specific phenotype.
Parameters
----------
fn : str
Path to GRASP database file.
pheno : str
Phenotype to extract from database.
out : sttr
Path to output file for subset of GRASP database.
"""
import subprocess
c = 'awk -F "\\t" \'NR == 1 || $12 == "{}" \' {} > {}'.format(
pheno.replace("'", '\\x27'), fn, out)
subprocess.check_call(c, shell=True)
def parse_grasp_gwas(fn):
"""
Read GRASP database and filter for unique hits.
Parameters
----------
fn : str
Path to (subset of) GRASP database.
Returns
-------
df : pandas.DataFrame
Pandas dataframe with de-duplicated, significant SNPs. The index is of
the form chrom:pos where pos is the one-based position of the SNP. The
columns are chrom, start, end, rsid, and pvalue. rsid may be empty or
not actually an RSID. chrom, start, end make a zero-based bed file with
the SNP coordinates.
"""
df = pd.read_table(fn, low_memory=False)
df = df[df.Pvalue < 1e-5]
df = df.sort(columns=['chr(hg19)', 'pos(hg19)', 'Pvalue'])
df = df.drop_duplicates(subset=['chr(hg19)', 'pos(hg19)'])
df = df[df.Pvalue < 1e-5]
df['chrom'] = 'chr' + df['chr(hg19)'].astype(str)
df['end'] = df['pos(hg19)']
df['start'] = df.end - 1
df['rsid'] = df['SNPid(in paper)']
df['pvalue'] = df['Pvalue']
df = df[['chrom', 'start', 'end', 'rsid', 'pvalue']]
df.index = df['chrom'].astype(str) + ':' + df['end'].astype(str)
return df
def parse_roadmap_gwas(fn):
"""
Read Roadmap GWAS file and filter for unique, significant (p < 1e-5)
SNPs.
Parameters
----------
fn : str
Path to (subset of) GRASP database.
Returns
-------
df : pandas.DataFrame
Pandas dataframe with de-duplicated, significant SNPs. The index is of
the form chrom:pos where pos is the one-based position of the SNP. The
columns are chrom, start, end, rsid, and pvalue. rsid may be empty or
not actually an RSID. chrom, start, end make a zero-based bed file with
the SNP coordinates.
"""
df = pd.read_table(fn, low_memory=False,
names=['chrom', 'start', 'end', 'rsid', 'pvalue'])
df = df[df.pvalue < 1e-5]
df = df.sort(columns=['chrom', 'start', 'pvalue'])
df = df.drop_duplicates(subset=['chrom', 'start'])
df = df[df['chrom'] != 'chrY']
df.index = df['chrom'].astype(str) + ':' + df['end'].astype(str)
return df
def ld_prune(df, ld_beds, snvs=None):
"""
Prune set of GWAS based on LD and significance. A graph of all SNVs is
constructed with edges for LD >= 0.8 and the most significant SNV per
connected component is kept.
Parameters
----------
df : pandas.DataFrame
Pandas dataframe with unique SNVs. The index is of the form chrom:pos
where pos is the one-based position of the SNV. The columns must include
chrom, start, end, and pvalue. chrom, start, end make a zero-based bed
file with the SNV coordinates.
ld_beds : dict
Dict whose keys are chromosomes and whose values are filenames of
tabixed LD bed files. An LD bed file looks like "chr1 11007 11008
11008:11012:1" where the first three columns are the zero-based
half-open coordinate of the SNV and the fourth column has the one-based
coordinate followed of the SNV followed by the one-based coordinate of a
different SNV and the LD between them. In this example, the variants are
in perfect LD. The bed file should also contain the reciprocal line for
this LD relationship: "chr1 11011 11012 11012:11008:1".
snvs : list
List of SNVs to filter against. If a SNV is not in this list, it will
not be included. If you are working with GWAS SNPs, this is useful for
filtering out SNVs that aren't in the SNPsnap database for instance.
Returns
-------
out : pandas.DataFrame
Pandas dataframe in the same format as the input dataframe but with only
independent SNVs.
"""
import networkx as nx
import tabix
if snvs:
df = df.ix[set(df.index) & set(snvs)]
keep = set()
for chrom in list(ld_beds.keys()):
tdf = df[df['chrom'].astype(str) == chrom]
if tdf.shape[0] > 0:
f = tabix.open(ld_beds[chrom])
# Make a dict where each key is a SNP and the values are all of the
# other SNPs in LD with the key.
ld_d = {}
for j in tdf.index:
p = tdf.ix[j, 'end']
ld_d[p] = []
try:
r = f.query(chrom, p - 1, p)
while True:
try:
n = next(r)
p1, p2, r2 = n[-1].split(':')
if float(r2) >= 0.8:
ld_d[p].append(int(p2))
except StopIteration:
break
except TabixError:
continue
# Make adjacency matrix for LD.
cols = sorted(list(set(
[item for sublist in list(ld_d.values()) for item in sublist])))
t = pd.DataFrame(0, index=list(ld_d.keys()), columns=cols)
for k in list(ld_d.keys()):
t.ix[k, ld_d[k]] = 1
t.index = ['{}:{}'.format(chrom, x) for x in t.index]
t.columns = ['{}:{}'.format(chrom, x) for x in t.columns]
# Keep all SNPs not in LD with any others. These will be in the index
# but not in the columns.
keep |= set(t.index) - set(t.columns)
# Filter so we only have SNPs that are in LD with at least one other
# SNP.
ind = list(set(t.columns) & set(t.index))
# Keep one most sig. SNP per connected subgraph.
t = t.ix[ind, ind]
g = nx.Graph(t.values)
c = nx.connected_components(g)
while True:
try:
sg = next(c)
s = tdf.ix[t.index[list(sg)]]
keep.add(s[s.pvalue == s.pvalue.min()].index[0])
except StopIteration:
break
out = df.ix[keep]
return out
def ld_expand(df, ld_beds):
"""
Expand a set of SNVs into all SNVs with LD >= 0.8 and return a BedTool of
the expanded SNPs.
Parameters
----------
df : pandas.DataFrame
Pandas dataframe with SNVs. The index is of the form chrom:pos where pos
is the one-based position of the SNV. The columns are chrom, start, end.
chrom, start, end make a zero-based bed file with the SNV coordinates.
ld_beds : dict
Dict whose keys are chromosomes and whose values are filenames of
tabixed LD bed files. The LD bed files should be formatted like this:
chr1 14463 14464 14464:51479:0.254183
where the the first three columns indicate the zero-based coordinates of
a SNV and the the fourth column has the one-based coordinate of that
SNV, the one-based coordinate of another SNV on the same chromosome, and
the LD between these SNVs (all separated by colons).
Returns
-------
bt : pybedtools.BedTool
BedTool with input SNVs and SNVs they are in LD with.
indepdent SNVs.
"""
import pybedtools as pbt
import tabix
out_snps = []
for chrom in list(ld_beds.keys()):
t = tabix.open(ld_beds[chrom])
tdf = df[df['chrom'].astype(str) == chrom]
for ind in tdf.index:
p = tdf.ix[ind, 'end']
out_snps.append('{}\t{}\t{}\t{}\n'.format(chrom, p - 1, p, ind))
try:
r = t.query('{}'.format(chrom), p - 1, p)
while True:
try:
n = next(r)
p1, p2, r2 = n[-1].split(':')
if float(r2) >= 0.8:
out_snps.append('{}\t{}\t{}\t{}\n'.format(
n[0], int(p2) - 1, int(p2), ind))
except StopIteration:
break
except tabix.TabixError:
continue
bt = pbt.BedTool(''.join(out_snps), from_string=True)
bt = bt.sort()
return bt
def liftover_bed(
bed,
chain,
mapped=None,
unmapped=None,
liftOver_path='liftOver',
):
"""
Lift over a bed file using a given chain file.
Parameters
----------
bed : str or pybedtools.BedTool
Coordinates to lift over.
chain : str
Path to chain file to use for lift over.
mapped : str
Path for bed file with coordinates that are lifted over correctly.
unmapped : str
Path for text file to store coordinates that did not lift over
correctly. If this is not provided, these are discarded.
liftOver_path : str
Path to liftOver executable if not in path.
Returns
-------
new_coords : pandas.DataFrame
Pandas data frame with lift over results. Index is old coordinates in
the form chrom:start-end and columns are chrom, start, end and loc
(chrom:start-end) in new coordinate system.
"""
import subprocess
import pybedtools as pbt
if mapped == None:
import tempfile
mapped = tempfile.NamedTemporaryFile()
mname = mapped.name
else:
mname = mapped
if unmapped == None:
import tempfile
unmapped = tempfile.NamedTemporaryFile()
uname = unmapped.name
else:
uname = unmapped
if type(bed) == str:
bt = pbt.BedTool(bed)
elif type(bed) == pbt.bedtool.BedTool:
bt = bed
else:
sys.exit(1)
bt = bt.sort()
c = '{} {} {} {} {}'.format(liftOver_path, bt.fn, chain, mname, uname)
subprocess.check_call(c, shell=True)
with open(uname) as f:
missing = pbt.BedTool(''.join([x for x in f.readlines()[1::2]]),
from_string=True)
bt = bt.subtract(missing)
bt_mapped = pbt.BedTool(mname)
old_loc = []
for r in bt:
old_loc.append('{}:{}-{}'.format(r.chrom, r.start, r.end))
new_loc = []
new_chrom = []
new_start = []
new_end = []
for r in bt_mapped:
new_loc.append('{}:{}-{}'.format(r.chrom, r.start, r.end))
new_chrom.append(r.chrom)
new_start.append(r.start)
new_end.append(r.end)
new_coords = pd.DataFrame({'loc':new_loc, 'chrom': new_chrom,
'start': new_start, 'end': new_end},
index=old_loc)
for f in [mapped, unmapped]:
try:
f.close()
except AttributeError:
continue
return new_coords
def deseq2_size_factors(counts, meta, design):
"""
Get size factors for counts using DESeq2.
Parameters
----------
counts : pandas.DataFrame
Counts to pass to DESeq2.
meta : pandas.DataFrame
Pandas dataframe whose index matches the columns of counts. This is
passed to DESeq2's colData.
design : str
Design like ~subject_id that will be passed to DESeq2. The design
variables should match columns in meta.
Returns
-------
sf : pandas.Series
Series whose index matches the columns of counts and whose values are
the size factors from DESeq2. Divide each column by its size factor to
obtain normalized counts.
"""
import rpy2.robjects as r
from rpy2.robjects import pandas2ri
pandas2ri.activate()
r.r('suppressMessages(library(DESeq2))')
r.globalenv['counts'] = counts
r.globalenv['meta'] = meta
r.r('dds = DESeqDataSetFromMatrix(countData=counts, colData=meta, '
'design={})'.format(design))
r.r('dds = estimateSizeFactors(dds)')
r.r('sf = sizeFactors(dds)')
sf = r.globalenv['sf']
return pd.Series(sf, index=counts.columns)
def goseq_gene_enrichment(genes, sig, plot_fn=None, length_correct=True):
"""
Perform goseq enrichment for an Ensembl gene set.
Parameters
----------
genes : list
List of all genes as Ensembl IDs.
sig : list
List of boolean values indicating whether each gene is significant or
not.
plot_fn : str
Path to save length bias plot to. If not provided, the plot is deleted.
length_correct : bool
Correct for length bias.
Returns
-------
go_results : pandas.DataFrame
Dataframe with goseq results as well as Benjamini-Hochberg correct
p-values.
"""
import os
import readline
import statsmodels.stats.multitest as smm
import rpy2.robjects as r
genes = list(genes)
sig = [bool(x) for x in sig]
r.r('suppressMessages(library(goseq))')
r.globalenv['genes'] = list(genes)
r.globalenv['group'] = list(sig)
r.r('group = as.logical(group)')
r.r('names(group) = genes')
r.r('pwf = nullp(group, "hg19", "ensGene")')
if length_correct:
r.r('wall = goseq(pwf, "hg19", "ensGene")')
else:
r.r('wall = goseq(pwf, "hg19", "ensGene", method="Hypergeometric")')
r.r('t = as.data.frame(wall)')
t = r.globalenv['t']
go_results = pd.DataFrame(columns=list(t.colnames))
for i, c in enumerate(go_results.columns):
go_results[c] = list(t[i])
r, c, ask, abf = smm.multipletests(
go_results.over_represented_pvalue, alpha=0.05, method='fdr_i')
go_results['over_represented_pvalue_bh'] = c
r, c, ask, abf = smm.multipletests(
go_results.under_represented_pvalue, alpha=0.05, method='fdr_i')
go_results['under_represented_pvalue_bh'] = c
go_results.index = go_results.category
go_results = go_results.drop('category', axis=1)
if plot_fn and os.path.exists('Rplots.pdf'):
from os import rename
rename('Rplots.pdf', plot_fn)
elif os.path.exists('Rplots.pdf'):
from os import remove
remove('Rplots.pdf')
return go_results
def categories_to_colors(cats, colormap=None):
"""
Map categorical data to colors.
Parameters
----------
cats : pandas.Series or list
Categorical data as a list or in a Series.
colormap : list
List of RGB triples. If not provided, the tableau20 colormap defined in
this module will be used.
Returns
-------
legend : pd.Series
Series whose values are colors and whose index are the original
categories that correspond to those colors.
"""
if colormap is None:
colormap = tableau20
if type(cats) != pd.Series:
cats = pd.Series(cats)
legend = pd.Series(dict(list(zip(set(cats), colormap))))
# colors = pd.Series([legend[x] for x in cats.values], index=cats.index)
# I've removed this output:
# colors : pd.Series
# Series whose values are the colors for each category. If cats was a
# Series, then out will have the same index as cats.
return(legend)
def plot_color_legend(legend, horizontal=False, ax=None):
"""
Plot a pandas Series with labels and colors.
Parameters
----------
legend : pandas.Series
Pandas Series whose values are RGB triples and whose index contains
categorical labels.
horizontal : bool
If True, plot horizontally.
ax : matplotlib.axis
Axis to plot on.
Returns
-------
ax : matplotlib.axis
Plot axis.
"""
import matplotlib.pyplot as plt
import numpy as np
t = np.array([np.array([x for x in legend])])
if ax is None:
fig, ax = plt.subplots(1, 1)
if horizontal:
ax.imshow(t, interpolation='none')
ax.set_yticks([])
ax.set_xticks(np.arange(0, legend.shape[0]))
t = ax.set_xticklabels(legend.index)
else:
t = t.reshape([legend.shape[0], 1, 3])
ax.imshow(t, interpolation='none')
ax.set_xticks([])
ax.set_yticks(np.arange(0, legend.shape[0]))
t = ax.set_yticklabels(legend.index)
return ax
def make_color_legend_rects(colors, labels=None):
"""
Make list of rectangles and labels for making legends.
Parameters
----------
colors : pandas.Series or list
Pandas series whose values are colors and index is labels.
Alternatively, you can provide a list with colors and provide the labels
as a list.
labels : list
If colors is a list, this should be the list of corresponding labels.
Returns
-------
out : pd.Series
Pandas series whose values are matplotlib rectangles and whose index are
the legend labels for those rectangles. You can add each of these
rectangles to your axis using ax.add_patch(r) for r in out then create a
legend whose labels are out.values and whose labels are
legend_rects.index:
for r in legend_rects:
ax.add_patch(r)
lgd = ax.legend(legend_rects.values, labels=legend_rects.index)
"""
from matplotlib.pyplot import Rectangle
if labels:
d = dict(list(zip(labels, colors)))
se = pd.Series(d)
else:
se = colors
rects = []
for i in se.index:
r = Rectangle((0, 0), 0, 0, fc=se[i])
rects.append(r)
out = pd.Series(rects, index=se.index)
return out
class SVD:
def __init__(self, df, mean_center=True, scale_variance=False, full_matrices=False):
"""
Perform SVD for data matrix using scipy.linalg.svd. Note that this is currently inefficient
for large matrices due to some of the pandas operations.
Parameters
----------
df : pandas.DataFrame
Pandas data frame with data.
mean_center : bool
If True, mean center the rows. This should be done if not already
done.
scale_variance : bool
If True, scale the variance of each row to be one. Combined with
mean centering, this will transform your data into z-scores.
full_matrices : bool
Passed to scipy.linalg.svd. If True, U and Vh are of shape (M, M), (N, N). If False, the
shapes are (M, K) and (K, N), where K = min(M, N).
"""
import copy
self.data_orig = copy.deepcopy(df)
self.data = copy.deepcopy(df)
if mean_center:
self.data = (self.data.T - self.data.mean(axis=1)).T
if scale_variance:
self.data = (self.data.T / self.data.std(axis=1)).T
self._perform_svd(full_matrices)
def _perform_svd(self, full_matrices):
from scipy.linalg import svd
u, s, vh = svd(self.data, full_matrices=full_matrices)
self.u_orig = u
self.s_orig = s
self.vh_orig = vh
self.u = pd.DataFrame(
u,
index=self.data.index,
columns=['PC{}'.format(x) for x in range(1, u.shape[1] + 1)],
)
self.v = pd.DataFrame(
vh.T,
index=self.data.columns,
columns=['PC{}'.format(x) for x in range(1, vh.shape[0] + 1)],
)
index = ['PC{}'.format(x) for x in range(1, len(s) + 1)]
self.s_norm = pd.Series(s / s.sum(), index=index)
def plot_variance_explained(self, cumulative=False, xtick_start=1,
xtick_spacing=1, num_pc=None):
"""
Plot amount of variance explained by each principal component.
Parameters
----------
num_pc : int
Number of principal components to plot. If None, plot all.
cumulative : bool
If True, include cumulative variance.
xtick_start : int
The first principal component to label on the x-axis.
xtick_spacing : int
The spacing between labels on the x-axis.
"""
import matplotlib.pyplot as plt
from numpy import arange
if num_pc:
s_norm = self.s_norm[0:num_pc]
else:
s_norm = self.s_norm
if cumulative:
s_cumsum = s_norm.cumsum()
plt.bar(list(range(s_cumsum.shape[0])), s_cumsum.values,
label='Cumulative', color=(0.17254901960784313,
0.6274509803921569,
0.17254901960784313))
plt.bar(list(range(s_norm.shape[0])), s_norm.values, label='Per PC',
color=(0.12156862745098039, 0.4666666666666667,
0.7058823529411765))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylabel('Variance')
else:
plt.bar(list(range(s_norm.shape[0])), s_norm.values,
color=(0.12156862745098039, 0.4666666666666667,
0.7058823529411765))
plt.ylabel('Proportion variance explained')
plt.xlabel('PC')
plt.xlim(0, s_norm.shape[0])
tick_locs = arange(xtick_start - 1, s_norm.shape[0],
step=xtick_spacing)
# 0.8 is the width of the bars.
tick_locs = tick_locs + 0.4
plt.xticks(tick_locs,
arange(xtick_start, s_norm.shape[0] + 1, xtick_spacing))
def plot_pc_scatter(self, pc1, pc2, v=True, subset=None, ax=None,
color=None, s=None, marker=None, color_name=None,
s_name=None, marker_name=None):
"""
Make a scatter plot of two principal components. You can create
differently colored, sized, or marked scatter points.
Parameters
----------
pc1 : str
String of form PCX where X is the number of the principal component
you want to plot on the x-axis.
pc2 : str
String of form PCX where X is the number of the principal component
you want to plot on the y-axis.
v : bool
If True, use the v matrix for plotting the principal components
(typical if input data was genes as rows and samples as columns).
If False, use the u matrix.
subset : list
Make the scatter plot using only a subset of the rows of u or v.
ax : matplotlib.axes
Plot the scatter plot on this axis.
color : pandas.Series
Pandas series containing a categorical variable to color the scatter
points.
s : pandas.Series
Pandas series containing a categorical variable to size the scatter
points. Currently limited to 7 distinct values (sizes).
marker : pandas.Series
Pandas series containing a categorical variable to choose the marker
type for the scatter points. Currently limited to 21 distinct values
(marker styles).
color_name : str
Name for the color legend if a categorical variable for color is
provided.
s_name : str
Name for the size legend if a categorical variable for size is
provided.
marker_name : str
Name for the marker legend if a categorical variable for marker type
is provided.
Returns
-------
ax : matplotlib.axes._subplots.AxesSubplot
Scatter plot axis.
TODO: Add ability to label points.
"""
import matplotlib.pyplot as plt
import seaborn as sns
assert s <= 7, 'Error: too many values for "s"'
if v:
df = self.v
else:
df = self.u
if color is not None:
if color.unique().shape[0] <= 10:
colormap = pd.Series(dict(list(zip(set(color.values),
tableau20[0:2 * len(set(color)):2]))))
else:
colormap = pd.Series(dict(list(zip(set(color.values),
sns.color_palette('husl', len(set(color)))))))
color = pd.Series([colormap[x] for x in color.values],
index=color.index)
color_legend = True
if not color_name:
color_name = color.index.name
else:
color = pd.Series([tableau20[0]] * df.shape[0], index=df.index)
color_legend = False
if s is not None:
smap = pd.Series(dict(list(zip(
set(s.values), list(range(30, 351))[0::50][0:len(set(s)) + 1]))))
s = pd.Series([smap[x] for x in s.values],
index=s.index)
s_legend = True
if not s_name:
s_name = s.index.name
else:
s = pd.Series(30, index=df.index)
s_legend = False
markers = ['o', '*', 's', 'v', '+', 'x', 'd',
'p', '2', '<', '|', '>', '_', 'h',
'1', '2', '3', '4', '8', '^', 'D']
if marker is not None:
markermap = pd.Series(dict(list(zip(set(marker.values), markers))))
marker = pd.Series([markermap[x] for x in marker.values],
index=marker.index)
marker_legend = True
if not marker_name:
marker_name = marker.index.name
else:
marker = pd.Series('o', index=df.index)
marker_legend = False
if ax is None:
fig, ax = plt.subplots(1, 1)
for m in set(marker.values):
mse = marker[marker == m]
cse = color[mse.index]
sse = s[mse.index]
ax.scatter(df.ix[mse.index, pc1], df.ix[mse.index, pc2],
s=sse.values, color=list(cse.values), marker=m,
alpha=0.8)
ax.set_title('{} vs. {}'.format(pc1, pc2))
ax.set_xlabel(pc1)
ax.set_ylabel(pc2)
if color_legend:
legend_rects = make_color_legend_rects(colormap)
for r in legend_rects:
ax.add_patch(r)
lgd = ax.legend(legend_rects.values, labels=legend_rects.index,
title=color_name,
loc='upper left',
bbox_to_anchor=(1, 1))
if s_legend:
if lgd:
lgd = ax.add_artist(lgd)
xa, xb = ax.get_xlim()
ya, yb = ax.get_ylim()
for i in smap.index:
ax.scatter([xb + 1], [yb + 1], marker='o',
s=smap[i], color='black', label=i)
lgd = ax.legend(title=s_name, loc='center left',
bbox_to_anchor=(1, 0.5))
ax.set_xlim(xa, xb)
ax.set_ylim(ya, yb)
if marker_legend:
if lgd:
lgd = ax.add_artist(lgd)
xa, xb = ax.get_xlim()
ya, yb = ax.get_ylim()
for i in markermap.index:
t = ax.scatter([xb + 1], [yb + 1], marker=markermap[i],
s=sse.min(), color='black', label=i)
handles, labels = ax.get_legend_handles_labels()
if s_legend:
handles = handles[len(smap):]
labels = labels[len(smap):]
lgd = ax.legend(handles, labels, title=marker_name,
loc='lower left', bbox_to_anchor=(1, 0))
ax.set_xlim(xa, xb)
ax.set_ylim(ya, yb)
# fig.tight_layout()
return fig, ax
def pc_correlation(self, covariates, num_pc=5):
"""
Calculate the correlation between the first num_pc prinicipal components
and known covariates. The size and index of covariates determines
whether u or v is used.
Parameters
----------
covariates : pandas.DataFrame
Dataframe of covariates whose index corresponds to the index of
either u or v.
num_pc : int
Number of principal components to correlate with.
Returns
-------
corr : pandas.Panel
Panel with correlation values and p-values.
"""
from scipy.stats import spearmanr
if (covariates.shape[0] == self.u.shape[0] and
len(set(covariates.index) & set(self.u.index)) == self.u.shape[0]):
mat = self.u
elif (covariates.shape[0] == self.v.shape[0] and
len(set(covariates.index) & set(self.v.index)) == self.v.shape[0]):
mat = self.v
else:
import sys
sys.stderr.write('Covariates differ in size from input data.\n')
sys.exit(1)
corr = pd.Panel(items=['rho', 'pvalue'],
major_axis=covariates.columns,
minor_axis=mat.columns[0:num_pc])
for i in corr.major_axis:
for j in corr.minor_axis:
rho, p = spearmanr(covariates[i], mat[j])
corr.ix['rho', i, j] = rho
corr.ix['pvalue', i, j] = p
return corr
def pc_anova(self, covariates, num_pc=5):
"""
Calculate one-way ANOVA between the first num_pc prinicipal components
and known covariates. The size and index of covariates determines
whether u or v is used.
Parameters
----------
covariates : pandas.DataFrame
Dataframe of covariates whose index corresponds to the index of
either u or v.
num_pc : int
Number of principal components to correlate with.
Returns
-------
anova : pandas.Panel
Panel with F-values and p-values.
"""
from scipy.stats import f_oneway
if (covariates.shape[0] == self.u.shape[0] and
len(set(covariates.index) & set(self.u.index)) == self.u.shape[0]):
mat = self.u
elif (covariates.shape[0] == self.v.shape[0] and
len(set(covariates.index) & set(self.v.index)) == self.v.shape[0]):
mat = self.v
anova = pd.Panel(items=['fvalue', 'pvalue'],
major_axis=covariates.columns,
minor_axis=mat.columns[0:num_pc])
for i in anova.major_axis:
for j in anova.minor_axis:
t = [mat[j][covariates[i] == x] for x in set(covariates[i])]
f, p = f_oneway(*t)
anova.ix['fvalue', i, j] = f
anova.ix['pvalue', i, j] = p
return anova
def manhattan_plot(
res,
ax,
p_filter=1,
p_cutoff=None,
marker_size=10,
font_size=8,
chrom_labels=list(range(1, 23))[0::2],
label_column=None,
category_order=None,
legend=True,
):
"""
Make Manhattan plot for GWAS results. Currently only support autosomes.
Parameters
----------
res : pandas.DataFrame
GWAS results. The following columns are required - chrom (chromsome,
int), pos (genomic position, int), P (GWAS p-value, float).
ax : matplotlib.axis
Matplotlib axis to make Manhattan plot on.
p_filter : float
Only plot p-values smaller than this cutoff. This is useful for testing
because filtering on p-values speeds up the plotting.
p_cutoff : float
Plot horizontal line at this p-value.
marker_size : int
Size of Manhattan markers.
font_size : int
Font size for plots.
chrom_labels : list
List of ints indicating which chromsomes to label. You may want to
modulate this based on the size of the plot. Currently only integers
1-22 are supported.
label_column : str
String with column name from res. This column should contain a
categorical annotation for each variant. These will be indicated by
colors.
category_order : list
If label_column is not None, you can provide a list of the categories
that are contained in the label_column. This will be used to assign the
color palette and will specify the z-order of the categories.
legend : boolean
If True and label_column is not None, plot a legend.
Returns
-------
res : pandas.Dataframe
GWAS results. The results will have additional columns that were used
for plotting.
ax : matplotlib.axis
Axis with the Manhattan plot.
colors : pd.Series or None
If label_column is None, this will be None. Otherwise, if a label_column
is specified, this will be a series with a mapping between the labels
and the colors for each label.
"""
# TODO: It might make sense to allow a variable that specifies the z-order
# of labels in label_column. If there are many labels and points in the same
# place, certain annotations will be preferentially shown.
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
# Filter results based on p-value.
if p_filter < 1:
res = res[res['P'] < p_filter]
# Assign x coordinates for each association.
res['xpos'] = np.nan
chrom_vc = res['chrom'].value_counts()
# total_length is arbitrary, but it's a little easier than working with the
# normalized chromosome sizes to avoid small numbers.
total_length = 1000
right = chrom_sizes_norm.cumsum()
right = right / right[22] * total_length
left = chrom_sizes_norm.cumsum() - chrom_sizes_norm[1]
left = pd.Series(0, list(range(1, 23)))
left[1:23] = right[0:21].values
for chrom in range(1, 23):
if chrom in res['chrom'].values:
res.loc[res['chrom'] == chrom, 'xpos'] = np.linspace(
left[chrom], right[chrom], chrom_vc[chrom])
# Assign colors.
grey = mpl.colors.to_rgb('grey')
light_grey = (0.9, 0.9, 0.9)
middle_grey = (0.8, 0.8, 0.8)
# I first set everything to black, but in the end everything should be
# changed to one of the greys (or other colors if there is an annotation
# column). If there are black points on the plot, that indicates a problem.
res['color'] = 'black'
for chrom in range(1, 23)[0::2]:
if chrom in res['chrom'].values:
ind = res[res.chrom == chrom].index
res.loc[ind, 'color'] = pd.Series([grey for x in ind], index=ind)
for chrom in range(1, 23)[1::2]:
if chrom in res['chrom'].values:
ind = res[res.chrom == chrom].index
res.loc[ind, 'color'] = pd.Series([middle_grey for x in ind], index=ind)
if label_column is not None:
if category_order is not None:
assert set(category_order) == set(res[label_column].dropna())
categories = category_order
else:
categories = list(set(res[label_column].dropna()))
colors = categories_to_colors(
categories,
colormap=sns.color_palette('colorblind'),
)
for cat in categories:
ind = res[res[label_column] == cat].index
res.loc[ind, 'color'] = pd.Series([colors[cat] for x in ind],
index=ind)
# Plot
if label_column is not None:
ind = res[res[label_column].isnull()].index
ax.scatter(
res.loc[ind, 'xpos'],
-np.log10(res.loc[ind, 'P']),
color=res.loc[ind, 'color'],
s=marker_size,
alpha=0.75,
rasterized=True,
label=None,
)
for cat in reversed(categories):
ind = res[res[label_column] == cat].index
ax.scatter(
res.loc[ind, 'xpos'],
-np.log10(res.loc[ind, 'P']),
color=res.loc[ind, 'color'],
s=marker_size,
alpha=0.75,
rasterized=True,
label=None,
)
else:
ax.scatter(
res['xpos'],
-np.log10(res['P']),
color=res['color'],
s=marker_size,
alpha=0.75,
rasterized=True,
label=None,
)
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
ax.grid(axis='x')
ax.grid(axis='y')
ax.grid(axis='y', alpha=0.5, ls='-', lw=0.6)
if p_cutoff is not None:
ax.hlines(
-np.log10(p_cutoff),
-5,
total_length + 5,
color='red',
linestyles='--',
lw=0.8,
alpha=0.5,
)
# These next two lines add background shading. I may add back in as option.
# for chrom in range(1, 23)[0::2]:
# ax.axvspan(left[chrom], right[chrom], facecolor=(0.4, 0.4, 0.4), alpha=0.2, lw=0)
ax.set_xlim(-5, total_length + 5)
ax.set_ylim(0, ymax)
# Set chromosome labels
# ind = range(1, 23)[0::2]
# if skip19:
# ind = [x for x in ind if x != 19]
ind = [x for x in chrom_labels if x in range(1, 23)]
ax.set_xticks(left[ind] + (right[ind] - left[ind]) / 2)
ax.set_xticklabels(ind, fontsize=font_size)
ax.set_ylabel('$-\log_{10} p$ value', fontsize=font_size)
for t in ax.get_xticklabels() + ax.get_yticklabels():
t.set_fontsize(font_size)
if label_column is not None and legend:
for cat in categories:
ax.scatter(
-100,
-100,
s=marker_size,
color=colors[cat],
label=cat,
)
if legend:
ax.legend(
fontsize=font_size- 1,
framealpha=0.5,
frameon=True,
facecolor='white',
)
# TODO: eventually, it would be better to be smarter about the x-axis
# limits. Depending on the size of the markers and plot, some of the markers
# might be cut off.
ax.set_xlim(-5, total_length + 5)
# TODO: eventually, it would be better to be smarter about the y-axis
# limits. Depending on the size of the markers and plot, some of the markers
# might be cut off. Matplotlib doesn't know anything about the size of the
# markers, so it might set the y-limit too low.
ax.set_ylim(-1 * np.log10(p_filter), ymax)
if label_column is None:
colors = None
return(res, ax, colors)
|
the-stack_0_13269 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SPDX-License-Identifier: GPL-3.0
#
# GNU Radio Python Flow Graph
# Title: NOAA APT Decoder
# Author: Manolis Surligas, George Vardakis
# Description: A NOAA APT Decoder with automatic image synchronization
# GNU Radio version: 3.8.1.0
from gnuradio import analog
from gnuradio import blocks
from gnuradio import filter
from gnuradio.filter import firdes
from gnuradio import gr
import sys
import signal
from argparse import ArgumentParser
from gnuradio.eng_arg import eng_float, intx
from gnuradio import eng_notation
from gnuradio.filter import pfb
import satnogs
import soapy
import distutils
from distutils import util
class satnogs_noaa_apt_decoder(gr.top_block):
def __init__(self, antenna="RX", bb_freq=0.0, bw=0.0, dc_removal="False", decoded_data_file_path="/tmp/.satnogs/data/data", dev_args="", doppler_correction_per_sec=20, enable_iq_dump=0, file_path="test.wav", flip_images=0, gain=0.0, gain_mode="Overall", iq_file_path="/tmp/iq.dat", lo_offset=100e3, other_settings="", ppm=0, rigctl_port=4532, rx_freq=100e6, samp_rate_rx=2048000, soapy_rx_device="driver=rtlsdr", stream_args="", sync=1, tune_args="", udp_IP="127.0.0.1", udp_dump_host="", udp_dump_port=57356, udp_port=16887, waterfall_file_path="/tmp/waterfall.dat"):
gr.top_block.__init__(self, "NOAA APT Decoder")
##################################################
# Parameters
##################################################
self.antenna = antenna
self.bb_freq = bb_freq
self.bw = bw
self.dc_removal = dc_removal
self.decoded_data_file_path = decoded_data_file_path
self.dev_args = dev_args
self.doppler_correction_per_sec = doppler_correction_per_sec
self.enable_iq_dump = enable_iq_dump
self.file_path = file_path
self.flip_images = flip_images
self.gain = gain
self.gain_mode = gain_mode
self.iq_file_path = iq_file_path
self.lo_offset = lo_offset
self.other_settings = other_settings
self.ppm = ppm
self.rigctl_port = rigctl_port
self.rx_freq = rx_freq
self.samp_rate_rx = samp_rate_rx
self.soapy_rx_device = soapy_rx_device
self.stream_args = stream_args
self.sync = sync
self.tune_args = tune_args
self.udp_IP = udp_IP
self.udp_dump_host = udp_dump_host
self.udp_dump_port = udp_dump_port
self.udp_port = udp_port
self.waterfall_file_path = waterfall_file_path
##################################################
# Variables
##################################################
self.audio_samp_rate = audio_samp_rate = 48000
##################################################
# Blocks
##################################################
self.soapy_source_0_0 = None
# Make sure that the gain mode is valid
if(gain_mode not in ['Overall', 'Specific', 'Settings Field']):
raise ValueError("Wrong gain mode on channel 0. Allowed gain modes: "
"['Overall', 'Specific', 'Settings Field']")
dev = soapy_rx_device
# Stream arguments for every activated stream
tune_args = [tune_args]
settings = [other_settings]
# Setup the device arguments
dev_args = dev_args
self.soapy_source_0_0 = soapy.source(1, dev, dev_args, stream_args,
tune_args, settings, samp_rate_rx, "fc32")
self.soapy_source_0_0.set_dc_removal(0,bool(distutils.util.strtobool(dc_removal)))
# Set up DC offset. If set to (0, 0) internally the source block
# will handle the case if no DC offset correction is supported
self.soapy_source_0_0.set_dc_offset(0,0)
# Setup IQ Balance. If set to (0, 0) internally the source block
# will handle the case if no IQ balance correction is supported
self.soapy_source_0_0.set_iq_balance(0,0)
self.soapy_source_0_0.set_agc(0,False)
# generic frequency setting should be specified first
self.soapy_source_0_0.set_frequency(0, rx_freq - lo_offset)
self.soapy_source_0_0.set_frequency(0,"BB",bb_freq)
# Setup Frequency correction. If set to 0 internally the source block
# will handle the case if no frequency correction is supported
self.soapy_source_0_0.set_frequency_correction(0,ppm)
self.soapy_source_0_0.set_antenna(0,antenna)
self.soapy_source_0_0.set_bandwidth(0,bw)
if(gain_mode != 'Settings Field'):
# pass is needed, in case the template does not evaluare anything
pass
self.soapy_source_0_0.set_gain(0,gain)
self.satnogs_waterfall_sink_0_0 = satnogs.waterfall_sink(4*4160*4, rx_freq, 10, 1024, waterfall_file_path, 1)
self.satnogs_ogg_encoder_0 = satnogs.ogg_encoder(file_path, audio_samp_rate, 0.8)
self.satnogs_noaa_apt_sink_1 = satnogs.noaa_apt_sink(decoded_data_file_path, 2080, 1800, True, False)
self.satnogs_iq_sink_0_0 = satnogs.iq_sink(16768, iq_file_path, False, enable_iq_dump)
self.satnogs_doppler_compensation_0 = satnogs.doppler_compensation(samp_rate_rx, rx_freq, lo_offset, 4*4160*4, 1, 0)
self.rational_resampler_xxx_0_0 = filter.rational_resampler_fff(
interpolation=1,
decimation=4,
taps=None,
fractional_bw=None)
self.pfb_arb_resampler_xxx_0 = pfb.arb_resampler_fff(
audio_samp_rate / (4*4160*4),
taps=None,
flt_size=32)
self.pfb_arb_resampler_xxx_0.declare_sample_delay(0)
self.low_pass_filter_0_0 = filter.fir_filter_ccf(
1,
firdes.low_pass(
1,
4*4160*4,
4*4160*1.1,
1e3,
firdes.WIN_HAMMING,
6.76))
self.hilbert_fc_0 = filter.hilbert_fc(65, firdes.WIN_HAMMING, 6.76)
self.blocks_udp_sink_0_0 = blocks.udp_sink(gr.sizeof_gr_complex*1, udp_dump_host, udp_dump_port, 1472, True)
self.blocks_complex_to_mag_0 = blocks.complex_to_mag(1)
self.band_pass_filter_0 = filter.fir_filter_fff(
4,
firdes.band_pass(
1,
(4*4160*4 ),
500,
4.2e3,
200,
firdes.WIN_HAMMING,
6.76))
self.analog_wfm_rcv_0 = analog.wfm_rcv(
quad_rate=4*4160*4,
audio_decimation=1,
)
##################################################
# Connections
##################################################
self.connect((self.analog_wfm_rcv_0, 0), (self.band_pass_filter_0, 0))
self.connect((self.analog_wfm_rcv_0, 0), (self.pfb_arb_resampler_xxx_0, 0))
self.connect((self.band_pass_filter_0, 0), (self.hilbert_fc_0, 0))
self.connect((self.blocks_complex_to_mag_0, 0), (self.rational_resampler_xxx_0_0, 0))
self.connect((self.hilbert_fc_0, 0), (self.blocks_complex_to_mag_0, 0))
self.connect((self.low_pass_filter_0_0, 0), (self.analog_wfm_rcv_0, 0))
self.connect((self.pfb_arb_resampler_xxx_0, 0), (self.satnogs_ogg_encoder_0, 0))
self.connect((self.rational_resampler_xxx_0_0, 0), (self.satnogs_noaa_apt_sink_1, 0))
self.connect((self.satnogs_doppler_compensation_0, 0), (self.blocks_udp_sink_0_0, 0))
self.connect((self.satnogs_doppler_compensation_0, 0), (self.low_pass_filter_0_0, 0))
self.connect((self.satnogs_doppler_compensation_0, 0), (self.satnogs_iq_sink_0_0, 0))
self.connect((self.satnogs_doppler_compensation_0, 0), (self.satnogs_waterfall_sink_0_0, 0))
self.connect((self.soapy_source_0_0, 0), (self.satnogs_doppler_compensation_0, 0))
def get_antenna(self):
return self.antenna
def set_antenna(self, antenna):
self.antenna = antenna
self.soapy_source_0_0.set_antenna(0,self.antenna)
def get_bb_freq(self):
return self.bb_freq
def set_bb_freq(self, bb_freq):
self.bb_freq = bb_freq
self.soapy_source_0_0.set_frequency(0,"BB",self.bb_freq)
def get_bw(self):
return self.bw
def set_bw(self, bw):
self.bw = bw
self.soapy_source_0_0.set_bandwidth(0,self.bw)
def get_dc_removal(self):
return self.dc_removal
def set_dc_removal(self, dc_removal):
self.dc_removal = dc_removal
self.soapy_source_0_0.set_dc_removal(0,bool(distutils.util.strtobool(self.dc_removal)))
def get_decoded_data_file_path(self):
return self.decoded_data_file_path
def set_decoded_data_file_path(self, decoded_data_file_path):
self.decoded_data_file_path = decoded_data_file_path
def get_dev_args(self):
return self.dev_args
def set_dev_args(self, dev_args):
self.dev_args = dev_args
def get_doppler_correction_per_sec(self):
return self.doppler_correction_per_sec
def set_doppler_correction_per_sec(self, doppler_correction_per_sec):
self.doppler_correction_per_sec = doppler_correction_per_sec
def get_enable_iq_dump(self):
return self.enable_iq_dump
def set_enable_iq_dump(self, enable_iq_dump):
self.enable_iq_dump = enable_iq_dump
def get_file_path(self):
return self.file_path
def set_file_path(self, file_path):
self.file_path = file_path
def get_flip_images(self):
return self.flip_images
def set_flip_images(self, flip_images):
self.flip_images = flip_images
def get_gain(self):
return self.gain
def set_gain(self, gain):
self.gain = gain
self.soapy_source_0_0.set_gain(0, self.gain)
def get_gain_mode(self):
return self.gain_mode
def set_gain_mode(self, gain_mode):
self.gain_mode = gain_mode
def get_iq_file_path(self):
return self.iq_file_path
def set_iq_file_path(self, iq_file_path):
self.iq_file_path = iq_file_path
def get_lo_offset(self):
return self.lo_offset
def set_lo_offset(self, lo_offset):
self.lo_offset = lo_offset
self.soapy_source_0_0.set_frequency(0, self.rx_freq - self.lo_offset)
def get_other_settings(self):
return self.other_settings
def set_other_settings(self, other_settings):
self.other_settings = other_settings
def get_ppm(self):
return self.ppm
def set_ppm(self, ppm):
self.ppm = ppm
self.soapy_source_0_0.set_frequency_correction(0,self.ppm)
def get_rigctl_port(self):
return self.rigctl_port
def set_rigctl_port(self, rigctl_port):
self.rigctl_port = rigctl_port
def get_rx_freq(self):
return self.rx_freq
def set_rx_freq(self, rx_freq):
self.rx_freq = rx_freq
self.soapy_source_0_0.set_frequency(0, self.rx_freq - self.lo_offset)
def get_samp_rate_rx(self):
return self.samp_rate_rx
def set_samp_rate_rx(self, samp_rate_rx):
self.samp_rate_rx = samp_rate_rx
def get_soapy_rx_device(self):
return self.soapy_rx_device
def set_soapy_rx_device(self, soapy_rx_device):
self.soapy_rx_device = soapy_rx_device
def get_stream_args(self):
return self.stream_args
def set_stream_args(self, stream_args):
self.stream_args = stream_args
def get_sync(self):
return self.sync
def set_sync(self, sync):
self.sync = sync
def get_tune_args(self):
return self.tune_args
def set_tune_args(self, tune_args):
self.tune_args = tune_args
def get_udp_IP(self):
return self.udp_IP
def set_udp_IP(self, udp_IP):
self.udp_IP = udp_IP
def get_udp_dump_host(self):
return self.udp_dump_host
def set_udp_dump_host(self, udp_dump_host):
self.udp_dump_host = udp_dump_host
def get_udp_dump_port(self):
return self.udp_dump_port
def set_udp_dump_port(self, udp_dump_port):
self.udp_dump_port = udp_dump_port
def get_udp_port(self):
return self.udp_port
def set_udp_port(self, udp_port):
self.udp_port = udp_port
def get_waterfall_file_path(self):
return self.waterfall_file_path
def set_waterfall_file_path(self, waterfall_file_path):
self.waterfall_file_path = waterfall_file_path
def get_audio_samp_rate(self):
return self.audio_samp_rate
def set_audio_samp_rate(self, audio_samp_rate):
self.audio_samp_rate = audio_samp_rate
self.pfb_arb_resampler_xxx_0.set_rate(self.audio_samp_rate / (4*4160*4))
def argument_parser():
description = 'A NOAA APT Decoder with automatic image synchronization'
parser = ArgumentParser(description=description)
parser.add_argument(
"--antenna", dest="antenna", type=str, default="RX",
help="Set antenna [default=%(default)r]")
parser.add_argument(
"--bb-freq", dest="bb_freq", type=eng_float, default="0.0",
help="Set Baseband CORDIC frequency (if the device supports it) [default=%(default)r]")
parser.add_argument(
"--bw", dest="bw", type=eng_float, default="0.0",
help="Set Bandwidth [default=%(default)r]")
parser.add_argument(
"--dc-removal", dest="dc_removal", type=str, default="False",
help="Set Remove automatically the DC offset (if the device support it) [default=%(default)r]")
parser.add_argument(
"--decoded-data-file-path", dest="decoded_data_file_path", type=str, default="/tmp/.satnogs/data/data",
help="Set decoded_data_file_path [default=%(default)r]")
parser.add_argument(
"--dev-args", dest="dev_args", type=str, default="",
help="Set Device arguments [default=%(default)r]")
parser.add_argument(
"--doppler-correction-per-sec", dest="doppler_correction_per_sec", type=intx, default=20,
help="Set doppler_correction_per_sec [default=%(default)r]")
parser.add_argument(
"--enable-iq-dump", dest="enable_iq_dump", type=intx, default=0,
help="Set enable_iq_dump [default=%(default)r]")
parser.add_argument(
"--file-path", dest="file_path", type=str, default="test.wav",
help="Set file_path [default=%(default)r]")
parser.add_argument(
"--flip-images", dest="flip_images", type=intx, default=0,
help="Set flip_images [default=%(default)r]")
parser.add_argument(
"--gain", dest="gain", type=eng_float, default="0.0",
help="Set gain [default=%(default)r]")
parser.add_argument(
"--gain-mode", dest="gain_mode", type=str, default="Overall",
help="Set gain_mode [default=%(default)r]")
parser.add_argument(
"--iq-file-path", dest="iq_file_path", type=str, default="/tmp/iq.dat",
help="Set iq_file_path [default=%(default)r]")
parser.add_argument(
"--lo-offset", dest="lo_offset", type=eng_float, default="100.0k",
help="Set lo_offset [default=%(default)r]")
parser.add_argument(
"--other-settings", dest="other_settings", type=str, default="",
help="Set Soapy Channel other settings [default=%(default)r]")
parser.add_argument(
"--ppm", dest="ppm", type=eng_float, default="0.0",
help="Set ppm [default=%(default)r]")
parser.add_argument(
"--rigctl-port", dest="rigctl_port", type=intx, default=4532,
help="Set rigctl_port [default=%(default)r]")
parser.add_argument(
"--rx-freq", dest="rx_freq", type=eng_float, default="100.0M",
help="Set rx_freq [default=%(default)r]")
parser.add_argument(
"--samp-rate-rx", dest="samp_rate_rx", type=eng_float, default="2.048M",
help="Set Device Sampling rate [default=%(default)r]")
parser.add_argument(
"--soapy-rx-device", dest="soapy_rx_device", type=str, default="driver=rtlsdr",
help="Set soapy_rx_device [default=%(default)r]")
parser.add_argument(
"--stream-args", dest="stream_args", type=str, default="",
help="Set Soapy Stream arguments [default=%(default)r]")
parser.add_argument(
"--sync", dest="sync", type=intx, default=1,
help="Set sync [default=%(default)r]")
parser.add_argument(
"--tune-args", dest="tune_args", type=str, default="",
help="Set Soapy Channel Tune arguments [default=%(default)r]")
parser.add_argument(
"--udp-IP", dest="udp_IP", type=str, default="127.0.0.1",
help="Set udp_IP [default=%(default)r]")
parser.add_argument(
"--udp-dump-host", dest="udp_dump_host", type=str, default="",
help="Set udp_dump_host [default=%(default)r]")
parser.add_argument(
"--udp-dump-port", dest="udp_dump_port", type=intx, default=57356,
help="Set udp_dump_port [default=%(default)r]")
parser.add_argument(
"--udp-port", dest="udp_port", type=intx, default=16887,
help="Set udp_port [default=%(default)r]")
parser.add_argument(
"--waterfall-file-path", dest="waterfall_file_path", type=str, default="/tmp/waterfall.dat",
help="Set waterfall_file_path [default=%(default)r]")
return parser
def main(top_block_cls=satnogs_noaa_apt_decoder, options=None):
if options is None:
options = argument_parser().parse_args()
tb = top_block_cls(antenna=options.antenna, bb_freq=options.bb_freq, bw=options.bw, dc_removal=options.dc_removal, decoded_data_file_path=options.decoded_data_file_path, dev_args=options.dev_args, doppler_correction_per_sec=options.doppler_correction_per_sec, enable_iq_dump=options.enable_iq_dump, file_path=options.file_path, flip_images=options.flip_images, gain=options.gain, gain_mode=options.gain_mode, iq_file_path=options.iq_file_path, lo_offset=options.lo_offset, other_settings=options.other_settings, ppm=options.ppm, rigctl_port=options.rigctl_port, rx_freq=options.rx_freq, samp_rate_rx=options.samp_rate_rx, soapy_rx_device=options.soapy_rx_device, stream_args=options.stream_args, sync=options.sync, tune_args=options.tune_args, udp_IP=options.udp_IP, udp_dump_host=options.udp_dump_host, udp_dump_port=options.udp_dump_port, udp_port=options.udp_port, waterfall_file_path=options.waterfall_file_path)
def sig_handler(sig=None, frame=None):
tb.stop()
tb.wait()
sys.exit(0)
signal.signal(signal.SIGINT, sig_handler)
signal.signal(signal.SIGTERM, sig_handler)
tb.start()
tb.wait()
if __name__ == '__main__':
main()
|
the-stack_0_13273 | import collections
import functools
import glob
import ntpath
import os
import random
import re
import subprocess
import sys
import tempfile
from collections import Counter
from pathlib import Path
from urllib.request import Request, urlopen
import fastnumbers
import humanize
import numpy as np
import pandas as pd
import six
from fastnumbers import isint, isfloat
from string_grouper import match_strings
from optimus import ROOT_DIR
from optimus.engines import functions as F # Used in eval
from optimus.helpers.check import is_url
from optimus.helpers.columns import parse_columns
from optimus.helpers.converter import any_dataframe_to_pandas
from optimus.helpers.core import val_to_list, one_list_to_val
from optimus.helpers.logger import logger
from optimus.helpers.raiseit import RaiseIt
from optimus.infer import is_
F = F # To do not remove the import accidentally when using pycharm auto clean import feature
def random_int(n=5):
"""
Create a random string of ints
:return:
"""
return str(random.randint(1, 10 ** n))
def collect_as_list(df):
return df.rdd.flatMap(lambda x: x).collect()
def collect_as_dict(df, limit=None):
"""
Return a dict from a Collect result
[(col_name, row_value),(col_name_1, row_value_2),(col_name_3, row_value_3),(col_name_4, row_value_4)]
:return:
"""
dict_result = []
df = any_dataframe_to_pandas(df)
# if there is only an element in the dict just return the value
if len(dict_result) == 1:
dict_result = next(iter(dict_result.values()))
else:
col_names = parse_columns(df, "*")
# Because asDict can return messed columns names we order
for index, row in df.iterrows():
# _row = row.asDict()
r = collections.OrderedDict()
# for col_name, value in row.iteritems():
for col_name in col_names:
r[col_name] = row[col_name]
dict_result.append(r)
return dict_result
# def collect_as_dict(df, limit=None):
# """
# Return a dict from a Collect result
# :param df:
# :return:
# """
# # # Explore this approach seems faster
# # use_unicode = True
# # from pyspark.serializers import UTF8Deserializer
# # from pyspark.rdd import RDD
# # rdd = df._jdf.toJSON()
# # r = RDD(rdd.toJavaRDD(), df._sc, UTF8Deserializer(use_unicode))
# # if limit is None:
# # r.collect()
# # else:
# # r.take(limit)
# # return r
# #
# from optimus.helpers.columns import parse_columns
# dict_result = []
#
# # if there is only an element in the dict just return the value
# if len(dict_result) == 1:
# dict_result = next(iter(dict_result.values()))
# else:
# col_names = parse_columns(df, "*")
#
# # Because asDict can return messed columns names we order
# for row in df.collect():
# _row = row.asDict()
# r = collections.OrderedDict()
# for col in col_names:
# r[col] = _row[col]
# dict_result.append(r)
# return dict_result
def filter_list(val, index=0):
"""
Convert a list to None, int, str or a list filtering a specific index
[] to None
['test'] to test
:param val:
:param index:
:return:
"""
if len(val) == 0:
return None
else:
return one_list_to_val([column[index] for column in val])
def absolute_path(files, format="posix"):
"""
User project base folder to construct and absolute path
:param files: path files
:param format: posix or uri
:return:
"""
files = val_to_list(files)
result = None
if format == "uri":
result = [Path(ROOT_DIR + file).as_uri() for file in files]
elif format == "posix":
result = [Path(ROOT_DIR + file).as_posix() for file in files]
else:
RaiseIt.value_error(format, ["posix", "uri"])
result = one_list_to_val(result)
return result
def format_path(path, format="posix"):
"""
Format a path depending fo the operative system
:param path:
:param format:
:return:
"""
if format == "uri":
result = Path(path).as_uri()
elif format == "posix":
result = Path(path).as_posix()
return result
def java_version():
version = subprocess.check_output(['java', '-version'], stderr=subprocess.STDOUT)
pattern = '\"(\d+\.\d+).*\"'
print(re.search(pattern, version).groups()[0])
def setup_google_colab():
"""
Check if we are in Google Colab and setup it up
:return:
"""
from optimus.helpers.constants import JAVA_PATH_COLAB
from optimus.engines.spark.constants import SPARK_PATH_COLAB
from optimus.engines.spark.constants import SPARK_URL
from optimus.engines.spark.constants import SPARK_FILE
IN_COLAB = 'google.colab' in sys.modules
if IN_COLAB:
if not os.path.isdir(JAVA_PATH_COLAB) or not os.path.isdir(SPARK_PATH_COLAB):
print("Installing Optimus, Java8 and Spark. It could take 3 min...")
commands = [
"apt-get install openjdk-8-jdk-headless -qq > /dev/null",
"wget -q {SPARK_URL}".format(SPARK_URL=SPARK_URL),
"tar xf {SPARK_FILE}".format(SPARK_FILE=SPARK_FILE)
]
cmd = " && ".join(commands)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
p_stdout = p.stdout.read().decode("ascii")
p_stderr = p.stderr.read().decode("ascii")
print(p_stdout, p_stderr)
else:
print("Settings env vars")
# Always configure the env vars
os.environ["JAVA_HOME"] = JAVA_PATH_COLAB
os.environ["SPARK_HOME"] = SPARK_PATH_COLAB
def is_pyarrow_installed():
"""
Check if pyarrow is installed
:return:
"""
try:
import pyarrow
have_arrow = True
except ImportError:
have_arrow = False
return have_arrow
def check_env_vars(env_vars):
"""
Check if a environment var exist
:param env_vars: Environment var name
:return:
"""
for env_var in env_vars:
if env_var in os.environ:
logger.print(env_var + "=" + os.environ.get(env_var))
else:
logger.print(env_var + " is not set")
# Reference https://nvie.com/posts/modifying-deeply-nested-structures/
def ellipsis(data, length=20):
"""
Add a "..." if a string y greater than a specific length
:param data:
:param length: length taking into account to cut the string
:return:
"""
data = str(data)
return (data[:length] + '..') if len(data) > length else data
def create_buckets(lower_bound, upper_bound, bins):
"""
Create a dictionary with bins
:param lower_bound: low range
:param upper_bound: high range
:param bins: number of buckets
:return:
"""
range_value = (upper_bound - lower_bound) / bins
low = lower_bound
buckets = []
if bins == 1:
buckets.append({"lower": low, "upper": low + 1, "bucket": 0})
else:
for i in range(0, bins):
high = low + range_value
buckets.append({"lower": low, "upper": high, "bucket": i})
low = high
# Ensure that the upper bound is exactly the higher value.
# Because floating point calculation it can miss the upper bound in the final sum
buckets[bins - 1]["upper"] = upper_bound
return buckets
def deep_sort(obj):
"""
Recursively sort list or dict nested lists
"""
if isinstance(obj, dict):
_sorted = {}
for key in sorted(obj):
_sorted[key] = deep_sort(obj[key])
elif isinstance(obj, list):
new_list = []
for val in obj:
new_list.append(deep_sort(val))
_sorted = sorted(new_list)
else:
_sorted = obj
return _sorted
def infer_dataframes_keys(df_left: pd.DataFrame, df_right: pd.DataFrame):
"""
Infer the possible key columns in two data frames
:param df_left:
:param df_right:
:return:
"""
result = []
df_left = df_left.dropna().astype(str)
df_right = df_right.dropna().astype(str)
# Search column names wiht *id* substring
def check_ids_columns(_df):
return [x for x in _df.columns if re.search(r"_id| id|id_| id ", x)]
ids_columns_left = check_ids_columns(df_left)
ids_columns_right = check_ids_columns(df_right)
if len(ids_columns_left) == len(ids_columns_right):
for i, j in zip(ids_columns_left, ids_columns_right):
result.append((i, j,))
# Numeric median len
def min_max_len(_df):
df_is_int = _df.applymap(lambda value: fastnumbers.isint(value)).sum()
df_is_int = df_is_int[df_is_int == len(_df)]
int_columns_names = df_is_int.index.values
int_columns_df = _df[int_columns_names]
string_len = int_columns_df.applymap(lambda value: len(value))
return (int_columns_names, string_len.min().values, string_len.max().values)
min_max_df_left = min_max_len(df_left)
min_max_df_right = min_max_len(df_right)
def median_len(arr, idx):
"""
Calculate median len of the columns string
:param arr:
:param idx:
:return:
"""
_min = arr[1][idx]
_max = arr[2][idx]
if _min != _max:
_median = _max - _min
else:
_median = _max
return _median
for i, col_l in enumerate(min_max_df_left[0]):
median_left = median_len(min_max_df_left, i)
for j, col_r in enumerate(min_max_df_right[0]):
median_right = median_len(min_max_df_right, j)
if median_left == median_right:
result.append((col_l, col_r,))
# String Clustering
for col_l in df_left:
for col_r in df_right:
try:
m = match_strings(df_left[col_l], df_right[col_r], min_similarity=0.05)
if len(m) > 0:
result.append((col_l, col_r,))
except ValueError:
pass
# Count tuples
return [(count,) + item for item, count in Counter(result).items()]
def update_dict(d, u):
"""
Update only the given keys
:param d:
:param u:
:return:
"""
# python 3.8+ compatibility
try:
collectionsAbc = collections.abc
except ModuleNotFoundError:
collectionsAbc = collections
for k, v in six.iteritems(u):
dv = d.get(k, {})
if not isinstance(dv, collectionsAbc.Mapping):
d[k] = v
elif isinstance(v, collectionsAbc.Mapping):
d[k] = update_dict(dv, v)
else:
d[k] = v
return d
def reduce_mem_usage(df, categorical=True, categorical_threshold=50, verbose=False):
"""
Change the columns datatypes to reduce the memory usage. Also identify
:param df:
:param categorical:
:param categorical_threshold:
:param verbose:
:return:
"""
# Reference https://www.kaggle.com/arjanso/reducing-dataframe-memory-size-by-65/notebook
start_mem_usg = df.ext.size()
ints = df.applymap(isint).sum().compute().to_dict()
floats = df.applymap(isfloat).sum().compute().to_dict()
nulls = df.isnull().sum().compute().to_dict()
total_rows = len(df)
columns_dtype = {}
for x, y in ints.items():
if ints[x] == nulls[x]:
dtype = "object"
elif floats[x] == total_rows:
dtype = "numerical"
elif total_rows <= ints[x] + nulls[x]:
dtype = "numerical"
else:
dtype = "object"
columns_dtype[x] = dtype
numerical_int = [col for col, dtype in columns_dtype.items() if dtype == "numerical"]
final = {}
if len(numerical_int) > 0:
min_max = df.cols.range(numerical_int)
import numpy as np
for col_name in min_max.keys():
_min = min_max[col_name]["min"]
_max = min_max[col_name]["max"]
if _min >= 0:
if _max < 255:
final[col_name] = np.uint8
elif _max < 65535:
final[col_name] = np.uint16
elif _max < 4294967295:
final[col_name] = np.uint32
else:
final[col_name] = np.uint64
else:
if _min > np.iinfo(np.int8).min and _max < np.iinfo(np.int8).max:
final[col_name] = np.int8
elif _min > np.iinfo(np.int16).min and _max < np.iinfo(np.int16).max:
final[col_name] = np.int16
elif _min > np.iinfo(np.int32).min and _max < np.iinfo(np.int32).max:
final[col_name] = np.int32
elif _min > np.iinfo(np.int64).min and _max < np.iinfo(np.int64).max:
final[col_name] = np.int64
# print(final[col_name])
object_int = [col for col, dtype in columns_dtype.items() if dtype == "object"]
if len(object_int) > 0:
count_values = df.cols.value_counts(object_int)
# if categorical is True:
# for col_name in object_int:
# if len(count_values[col_name]) <= categorical_threshold:
# final[col_name] = "category"
df = df.astype(final)
mem_usg = df.ext.size()
if verbose is True:
print("Memory usage after optimization:", humanize.naturalsize(start_mem_usg))
print("Memory usage before optimization is: ", humanize.naturalsize(mem_usg))
print(round(100 * mem_usg / start_mem_usg), "% of the initial size")
return df
def downloader(url, file_format):
"""
Send the request to download a file
"""
def write_file(response, file, chunk_size=8192):
"""
Load the data from the http request and save it to disk
:param response: data returned from the server
:param file:
:param chunk_size: size chunk size of the data
:return:
"""
total_size = response.headers['Content-Length'].strip() if 'Content-Length' in response.headers else 100
total_size = int(total_size)
bytes_so_far = 0
while 1:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not chunk:
break
file.write(chunk)
total_size = bytes_so_far if bytes_so_far > total_size else total_size
return bytes_so_far
# try to infer the file format using the file extension
if file_format is None:
filename, file_format = os.path.splitext(url)
file_format = file_format.replace('.', '')
i = url.rfind('/')
data_name = url[(i + 1):]
headers = {"User-Agent": "Optimus Data Downloader/1.0"}
req = Request(url, None, headers)
logger.print("Downloading %s from %s", data_name, url)
# It seems that avro need a .avro extension file
with tempfile.NamedTemporaryFile(suffix="." + file_format, delete=False) as f:
bytes_downloaded = write_file(urlopen(req), f)
path = f.name
if bytes_downloaded > 0:
logger.print("Downloaded %s bytes", bytes_downloaded)
logger.print("Creating DataFrame for %s. Please wait...", data_name)
return path
@functools.lru_cache(maxsize=128)
def prepare_path(path, file_format=None):
"""d
Helper to return the file to be loaded and the file name.
This will memoise
:param path: Path to the file to be loaded
:param file_format: format file
:return:
"""
r = []
if is_url(path):
file = downloader(path, file_format)
file_name = ntpath.basename(path)
r = [(file, file_name,)]
else:
for file_name in glob.glob(path, recursive=True):
r.append((file_name, ntpath.basename(file_name),))
if len(r) == 0:
raise Exception("File not found")
return r
def set_func(pdf, value, where, output_col, parser, default=None):
"""
Core implementation of the set function
:param pdf:
:param value:
:param where:
:param output_col:
:param parser:
:param default:
:return:
"""
col_names = list(filter(lambda x: x != "__match__", pdf.cols.names()))
profiler_dtype_to_python = {"decimal": "float", "int": "int", "string": "str", "datetime": "datetime",
"bool": "bool", "zip_code": "str"}
df = pdf.cols.cast(col_names, profiler_dtype_to_python[parser])
try:
if where is None:
return eval(value)
else:
# Reference https://stackoverflow.com/questions/33769860/pandas-apply-but-only-for-rows-where-a-condition-is-met
mask = (eval(where))
if (output_col not in pdf.cols.names()) and (default is not None):
pdf[output_col] = pdf[default]
pdf.loc[mask, output_col] = eval(value)
return pdf[output_col]
except (ValueError, TypeError) as e:
logger.print(e)
# raise
return np.nan
def set_function_parser(df, value, where, default=None):
"""
Infer the data type that must be used to make result calculation using the set function
:param df:
:param value:
:param where:
:return:
"""
value = str(value)
where = str(where)
def prepare_columns(cols):
"""
Extract the columns names from the value and where params
:param cols:
:return:
"""
if cols is not None:
r = val_to_list([f_col[1:len(f_col) - 1] for f_col in
re.findall(r"(df\['[A-Za-z0-9_ -]*'\])", cols.replace("\"", "'"))])
result = [re.findall(r"'([^']*)'", i)[0] for i in r]
else:
result = []
return result
if default is None:
default = []
# if default is in
columns = prepare_columns(value) + prepare_columns(where) + val_to_list(default)
columns = list(set(columns))
if columns:
first_columns = columns[0]
column_dtype = df.cols.infer_profiler_dtypes(first_columns)[first_columns]["dtype"]
else:
if fastnumbers.fast_int(value):
column_dtype = "int"
elif fastnumbers.fast_float(value):
column_dtype = "decimal"
else:
column_dtype = "string"
# if column_dtype in PROFILER_NUMERIC_DTYPES:
# func = lambda x: fastnumbers.fast_float(x) if x is not None else None
# elif column_dtype in PROFILER_STRING_DTYPES or column_dtype is None:
# func = lambda x: str(x) if not pd.isnull(x) else None
return columns, column_dtype
# value = "dd/MM/yyyy hh:mm:ss-sss MA"
def match_date(value):
"""
Returns Create a regex from a string with a date format
:param value:
:return:
"""
formats = ["d", "dd", "M", "MM", "yy", "yyyy", "h", "hh", "H", "HH", "kk", "k", "m", "mm", "s", "ss", "sss", "/",
":", "-", " ", "+", "|", "mi"]
formats.sort(key=len, reverse=True)
result = []
start = 0
end = len(value)
found = False
while start < end:
found = False
for f in formats:
if value.startswith(f, start):
start = start + len(f)
result.append(f)
found = True
break
if found is False:
raise ValueError('{} is not a valid date format'.format(value[start]))
exprs = []
for f in result:
# Separators
if f in ["/", ":", "-", " ", "|", "+", " "]:
exprs.append("\\" + f)
# elif f == ":":
# exprs.append("\\:")
# elif f == "-":
# exprs.append("\\-")
# elif f == " ":
# exprs.append(" ")
# elif f == "|":
# exprs.append("\\|")
# elif f == "+":
# exprs.append("\\+")
# Day
# d -> 1 ... 31
# dd -> 01 ... 31
elif f == "d":
exprs.append("(3[01]|[12][0-9]|0?[1-9])")
elif f == "dd":
exprs.append("(3[01]|[12][0-9]|0[1-9])")
# Month
# M -> 1 ... 12
# MM -> 01 ... 12
elif f == "M":
exprs.append("(1[0-2]|0?[1-9])")
elif f == "MM":
exprs.append("(1[0-2]|0[1-9])")
# Year
# yy -> 00 ... 99
# yyyy -> 0000 ... 9999
elif f == "yy":
exprs.append("[0-9]{2}")
elif f == "yyyy":
exprs.append("[0-9]{4}")
# Hours
# h -> 1,2 ... 12
# hh -> 01,02 ... 12
# H -> 0,1 ... 23
# HH -> 00,01 ... 23
# k -> 1,2 ... 24
# kk -> 01,02 ... 24
elif f == "h":
exprs.append("(1[0-2]|0?[1-9])")
elif f == "hh":
exprs.append("(1[0-2]|0[1-9])")
elif f == "H":
exprs.append("(0?[0-9]|1[0-9]|2[0-3]|[0-9])")
elif f == "HH":
exprs.append("(0[0-9]|1[0-9]|2[0-3]|[0-9])")
elif f == "k":
exprs.append("(0?[1-9]|1[0-9]|2[0-4]|[1-9])")
elif f == "kk":
exprs.append("(0[1-9]|1[0-9]|2[0-4])")
# Minutes
# m -> 0 ... 59
# mm -> 00 .. 59
elif f == "m":
exprs.append("[1-5]?[0-9]")
elif f == "mm":
exprs.append("[0-5][0-9]")
# Seconds
# s -> 0 ... 59
# ss -> 00 .. 59
elif f == "s":
exprs.append("[1-5]?[0-9]")
elif f == "ss":
exprs.append("[0-5][0-9]")
# Milliseconds
# sss -> 0 ... 999
elif f == "sss":
exprs.append("[0-9]{3}")
# Extras
# mi -> Meridian indicator (AM am Am) (PM pm Pm) (m M)
elif f == "mi":
exprs.append("([AaPp][Mm]|[Mm]).?")
return "".join(exprs)
# print("^" + match_date(value) + "$")
def ipython_vars(globals_vars, dtype=None):
"""
Return the list of data frames depending on the type
:param globals_vars: globals() from the notebook
:param dtype: 'pandas', 'cudf', 'dask' or 'dask_cudf'
:return:
"""
tmp = globals_vars.copy()
vars = [(k, v, type(v)) for k, v in tmp.items() if
not k.startswith('_') and k != 'tmp' and k != 'In' and k != 'Out' and not hasattr(v, '__call__')]
if dtype == "dask_cudf":
from dask_cudf.core import DataFrame as DaskCUDFDataFrame
_dtype = DaskCUDFDataFrame
elif dtype == "cudf":
from cudf.core import DataFrame as CUDFDataFrame
_dtype = CUDFDataFrame
elif dtype == "dask":
from dask.dataframe.core import DataFrame
_dtype = DataFrame
elif dtype == "pandas":
import pandas as pd
PandasDataFrame = pd.DataFrame
_dtype = PandasDataFrame
return [name for name, instance, aa in vars if is_(instance, _dtype)]
# Taken from https://github.com/Kemaweyan/singleton_decorator/
class _SingletonWrapper:
"""
A singleton wrapper class. Its instances would be created
for each decorated class.
"""
def __init__(self, cls):
self.__wrapped__ = cls
self._instance = None
def __call__(self, *args, **kwargs):
"""Returns a single instance of decorated class"""
if self._instance is None:
self._instance = self.__wrapped__(*args, **kwargs)
return self._instance
def singleton(cls):
"""
A singleton decorator. Returns a wrapper objects. A call on that object
returns a single instance object of decorated class. Use the __wrapped__
attribute to access decorated class directly in unit tests
"""
return _SingletonWrapper(cls)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.