metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jmgao/meson",
"score": 2
} |
#### File: mesonbuild/dependencies/base.py
```python
import copy
import functools
import os
import re
import stat
import json
import shlex
import shutil
import textwrap
import platform
from enum import Enum
from pathlib import PurePath
from .. import mlog
from .. import mesonlib
from ..compilers import clib_langs
from ..mesonlib import MesonException, OrderedSet
from ..mesonlib import Popen_safe, version_compare_many, version_compare, listify
# These must be defined in this file to avoid cyclical references.
packages = {}
_packages_accept_language = set()
class DependencyException(MesonException):
'''Exceptions raised while trying to find dependencies'''
class DependencyMethods(Enum):
# Auto means to use whatever dependency checking mechanisms in whatever order meson thinks is best.
AUTO = 'auto'
PKGCONFIG = 'pkg-config'
QMAKE = 'qmake'
# Just specify the standard link arguments, assuming the operating system provides the library.
SYSTEM = 'system'
# This is only supported on OSX - search the frameworks directory by name.
EXTRAFRAMEWORK = 'extraframework'
# Detect using the sysconfig module.
SYSCONFIG = 'sysconfig'
# Specify using a "program"-config style tool
CONFIG_TOOL = 'config-tool'
# For backwards compatibility
SDLCONFIG = 'sdlconfig'
CUPSCONFIG = 'cups-config'
PCAPCONFIG = 'pcap-config'
LIBWMFCONFIG = 'libwmf-config'
# Misc
DUB = 'dub'
class Dependency:
@classmethod
def _process_method_kw(cls, kwargs):
method = kwargs.get('method', 'auto')
if method not in [e.value for e in DependencyMethods]:
raise DependencyException('method {!r} is invalid'.format(method))
method = DependencyMethods(method)
# This sets per-tool config methods which are deprecated to to the new
# generic CONFIG_TOOL value.
if method in [DependencyMethods.SDLCONFIG, DependencyMethods.CUPSCONFIG,
DependencyMethods.PCAPCONFIG, DependencyMethods.LIBWMFCONFIG]:
mlog.warning(textwrap.dedent("""\
Configuration method {} has been deprecated in favor of
'config-tool'. This will be removed in a future version of
meson.""".format(method)))
method = DependencyMethods.CONFIG_TOOL
# Set the detection method. If the method is set to auto, use any available method.
# If method is set to a specific string, allow only that detection method.
if method == DependencyMethods.AUTO:
methods = cls.get_methods()
elif method in cls.get_methods():
methods = [method]
else:
raise DependencyException(
'Unsupported detection method: {}, allowed methods are {}'.format(
method.value,
mlog.format_list([x.value for x in [DependencyMethods.AUTO] + cls.get_methods()])))
return methods
def __init__(self, type_name, kwargs):
self.name = "null"
self.version = None
self.language = None # None means C-like
self.is_found = False
self.type_name = type_name
self.compile_args = []
self.link_args = []
# Raw -L and -l arguments without manual library searching
# If None, self.link_args will be used
self.raw_link_args = None
self.sources = []
self.methods = self._process_method_kw(kwargs)
def __repr__(self):
s = '<{0} {1}: {2}>'
return s.format(self.__class__.__name__, self.name, self.is_found)
def get_compile_args(self):
return self.compile_args
def get_link_args(self, raw=False):
if raw and self.raw_link_args is not None:
return self.raw_link_args
return self.link_args
def found(self):
return self.is_found
def get_sources(self):
"""Source files that need to be added to the target.
As an example, gtest-all.cc when using GTest."""
return self.sources
@staticmethod
def get_methods():
return [DependencyMethods.AUTO]
def get_name(self):
return self.name
def get_version(self):
if self.version:
return self.version
else:
return 'unknown'
def get_exe_args(self, compiler):
return []
def need_openmp(self):
return False
def need_threads(self):
return False
def get_pkgconfig_variable(self, variable_name, kwargs):
raise DependencyException('{!r} is not a pkgconfig dependency'.format(self.name))
def get_configtool_variable(self, variable_name):
raise DependencyException('{!r} is not a config-tool dependency'.format(self.name))
def get_partial_dependency(self, *, compile_args=False, link_args=False,
links=False, includes=False, sources=False):
"""Create a new dependency that contains part of the parent dependency.
The following options can be inherited:
links -- all link_with arguemnts
includes -- all include_directory and -I/-isystem calls
sources -- any source, header, or generated sources
compile_args -- any compile args
link_args -- any link args
Additionally the new dependency will have the version parameter of it's
parent (if any) and the requested values of any dependencies will be
added as well.
"""
RuntimeError('Unreachable code in partial_dependency called')
class InternalDependency(Dependency):
def __init__(self, version, incdirs, compile_args, link_args, libraries, whole_libraries, sources, ext_deps):
super().__init__('internal', {})
self.version = version
self.is_found = True
self.include_directories = incdirs
self.compile_args = compile_args
self.link_args = link_args
self.libraries = libraries
self.whole_libraries = whole_libraries
self.sources = sources
self.ext_deps = ext_deps
def get_pkgconfig_variable(self, variable_name, kwargs):
raise DependencyException('Method "get_pkgconfig_variable()" is '
'invalid for an internal dependency')
def get_configtool_variable(self, variable_name):
raise DependencyException('Method "get_configtool_variable()" is '
'invalid for an internal dependency')
def get_partial_dependency(self, *, compile_args=False, link_args=False,
links=False, includes=False, sources=False):
compile_args = self.compile_args.copy() if compile_args else []
link_args = self.link_args.copy() if link_args else []
libraries = self.libraries.copy() if links else []
whole_libraries = self.whole_libraries.copy() if links else []
sources = self.sources.copy() if sources else []
includes = self.include_directories.copy() if includes else []
deps = [d.get_partial_dependency(
compile_args=compile_args, link_args=link_args, links=links,
includes=includes, sources=sources) for d in self.ext_deps]
return InternalDependency(
self.version, includes, compile_args, link_args, libraries,
whole_libraries, sources, deps)
class ExternalDependency(Dependency):
def __init__(self, type_name, environment, language, kwargs):
super().__init__(type_name, kwargs)
self.env = environment
self.name = type_name # default
self.is_found = False
self.language = language
self.version_reqs = kwargs.get('version', None)
if isinstance(self.version_reqs, str):
self.version_reqs = [self.version_reqs]
self.required = kwargs.get('required', True)
self.silent = kwargs.get('silent', False)
self.static = kwargs.get('static', False)
if not isinstance(self.static, bool):
raise DependencyException('Static keyword must be boolean')
# Is this dependency for cross-compilation?
if 'native' in kwargs and self.env.is_cross_build():
self.want_cross = not kwargs['native']
else:
self.want_cross = self.env.is_cross_build()
self.clib_compiler = None
# Set the compiler that will be used by this dependency
# This is only used for configuration checks
if self.want_cross:
compilers = self.env.coredata.cross_compilers
else:
compilers = self.env.coredata.compilers
# Set the compiler for this dependency if a language is specified,
# else try to pick something that looks usable.
if self.language:
if self.language not in compilers:
m = self.name.capitalize() + ' requires a {0} compiler, but ' \
'{0} is not in the list of project languages'
raise DependencyException(m.format(self.language.capitalize()))
self.clib_compiler = compilers[self.language]
else:
# Try to find a compiler that can find C libraries for
# running compiler.find_library()
for lang in clib_langs:
self.clib_compiler = compilers.get(lang, None)
if self.clib_compiler:
break
def get_compiler(self):
return self.clib_compiler
def get_partial_dependency(self, *, compile_args=False, link_args=False,
links=False, includes=False, sources=False):
new = copy.copy(self)
if not compile_args:
new.compile_args = []
if not link_args:
new.link_args = []
if not sources:
new.sources = []
return new
def log_details(self):
return ''
def log_info(self):
return ''
def log_tried(self):
return ''
# Check if dependency version meets the requirements
def _check_version(self):
if not self.is_found:
return
if self.version_reqs:
# an unknown version can never satisfy any requirement
if not self.version:
found_msg = ['Dependency', mlog.bold(self.name), 'found:']
found_msg += [mlog.red('NO'), 'unknown version, but need:',
self.version_reqs]
mlog.log(*found_msg)
if self.required:
m = 'Unknown version of dependency {!r}, but need {!r}.'
raise DependencyException(m.format(self.name, self.version_reqs))
else:
(self.is_found, not_found, found) = \
version_compare_many(self.version, self.version_reqs)
if not self.is_found:
found_msg = ['Dependency', mlog.bold(self.name), 'found:']
found_msg += [mlog.red('NO'),
'found {!r} but need:'.format(self.version),
', '.join(["'{}'".format(e) for e in not_found])]
if found:
found_msg += ['; matched:',
', '.join(["'{}'".format(e) for e in found])]
mlog.log(*found_msg)
if self.required:
m = 'Invalid version of dependency, need {!r} {!r} found {!r}.'
raise DependencyException(m.format(self.name, not_found, self.version))
return
class NotFoundDependency(Dependency):
def __init__(self, environment):
super().__init__('not-found', {})
self.env = environment
self.name = 'not-found'
self.is_found = False
class ConfigToolDependency(ExternalDependency):
"""Class representing dependencies found using a config tool."""
tools = None
tool_name = None
__strip_version = re.compile(r'^[0-9.]*')
def __init__(self, name, environment, language, kwargs):
super().__init__('config-tool', environment, language, kwargs)
self.name = name
self.native = kwargs.get('native', False)
self.tools = listify(kwargs.get('tools', self.tools))
req_version = kwargs.get('version', None)
tool, version = self.find_config(req_version)
self.config = tool
self.is_found = self.report_config(version, req_version)
if not self.is_found:
self.config = None
return
self.version = version
if getattr(self, 'finish_init', None):
self.finish_init(self)
def _sanitize_version(self, version):
"""Remove any non-numeric, non-point version suffixes."""
m = self.__strip_version.match(version)
if m:
# Ensure that there isn't a trailing '.', such as an input like
# `1.2.3.git-1234`
return m.group(0).rstrip('.')
return version
@classmethod
def factory(cls, name, environment, language, kwargs, tools, tool_name, finish_init=None):
"""Constructor for use in dependencies that can be found multiple ways.
In addition to the standard constructor values, this constructor sets
the tool_name and tools values of the instance.
"""
# This deserves some explanation, because metaprogramming is hard.
# This uses type() to create a dynamic subclass of ConfigToolDependency
# with the tools and tool_name class attributes set, this class is then
# instantiated and returned. The reduce function (method) is also
# attached, since python's pickle module won't be able to do anything
# with this dynamically generated class otherwise.
def reduce(self):
return (cls._unpickle, (), self.__dict__)
sub = type('{}Dependency'.format(name.capitalize()), (cls, ),
{'tools': tools, 'tool_name': tool_name, '__reduce__': reduce, 'finish_init': staticmethod(finish_init)})
return sub(name, environment, language, kwargs)
@classmethod
def _unpickle(cls):
return cls.__new__(cls)
def find_config(self, versions=None):
"""Helper method that searchs for config tool binaries in PATH and
returns the one that best matches the given version requirements.
"""
if not isinstance(versions, list) and versions is not None:
versions = listify(versions)
if self.env.is_cross_build() and not self.native:
cross_file = self.env.cross_info.config['binaries']
try:
tools = [cross_file[self.tool_name]]
except KeyError:
mlog.warning('No entry for {0} specified in your cross file. '
'Falling back to searching PATH. This may find a '
'native version of {0}!'.format(self.tool_name))
tools = self.tools
else:
tools = self.tools
best_match = (None, None)
for tool in tools:
try:
p, out = Popen_safe([tool, '--version'])[:2]
except (FileNotFoundError, PermissionError):
continue
if p.returncode != 0:
continue
out = self._sanitize_version(out.strip())
# Some tools, like pcap-config don't supply a version, but also
# don't fail with --version, in that case just assume that there is
# only one version and return it.
if not out:
return (tool, None)
if versions:
is_found = version_compare_many(out, versions)[0]
# This allows returning a found version without a config tool,
# which is useful to inform the user that you found version x,
# but y was required.
if not is_found:
tool = None
if best_match[1]:
if version_compare(out, '> {}'.format(best_match[1])):
best_match = (tool, out)
else:
best_match = (tool, out)
return best_match
def report_config(self, version, req_version):
"""Helper method to print messages about the tool."""
if self.config is None:
if version is not None:
mlog.log('Found', mlog.bold(self.tool_name), repr(version),
mlog.red('NO'), '(needed', req_version, ')')
else:
mlog.log('Found', mlog.bold(self.tool_name), repr(req_version),
mlog.red('NO'))
return False
mlog.log('Found {}:'.format(self.tool_name), mlog.bold(shutil.which(self.config)),
'({})'.format(version))
return True
def get_config_value(self, args, stage):
p, out, err = Popen_safe([self.config] + args)
# This is required to keep shlex from stripping path separators on
# Windows. Also, don't put escape sequences in config values, okay?
out = out.replace('\\', '\\\\')
if p.returncode != 0:
if self.required:
raise DependencyException(
'Could not generate {} for {}.\n{}'.format(
stage, self.name, err))
return []
return shlex.split(out)
@staticmethod
def get_methods():
return [DependencyMethods.AUTO, DependencyMethods.CONFIG_TOOL]
def get_configtool_variable(self, variable_name):
p, out, _ = Popen_safe([self.config, '--{}'.format(variable_name)])
if p.returncode != 0:
if self.required:
raise DependencyException(
'Could not get variable "{}" for dependency {}'.format(
variable_name, self.name))
variable = out.strip()
mlog.debug('Got config-tool variable {} : {}'.format(variable_name, variable))
return variable
def log_tried(self):
return self.type_name
class PkgConfigDependency(ExternalDependency):
# The class's copy of the pkg-config path. Avoids having to search for it
# multiple times in the same Meson invocation.
class_pkgbin = None
# We cache all pkg-config subprocess invocations to avoid redundant calls
pkgbin_cache = {}
def __init__(self, name, environment, kwargs, language=None):
super().__init__('pkgconfig', environment, language, kwargs)
self.name = name
self.is_libtool = False
# Store a copy of the pkg-config path on the object itself so it is
# stored in the pickled coredata and recovered.
self.pkgbin = None
# When finding dependencies for cross-compiling, we don't care about
# the 'native' pkg-config
if self.want_cross:
if 'pkgconfig' not in environment.cross_info.config['binaries']:
if self.required:
raise DependencyException('Pkg-config binary missing from cross file')
else:
potential_pkgbin = ExternalProgram.from_cross_info(environment.cross_info, 'pkgconfig')
if potential_pkgbin.found():
self.pkgbin = potential_pkgbin
PkgConfigDependency.class_pkgbin = self.pkgbin
else:
mlog.debug('Cross pkg-config %s not found.' % potential_pkgbin.name)
# Only search for the native pkg-config the first time and
# store the result in the class definition
elif PkgConfigDependency.class_pkgbin is None:
self.pkgbin = self.check_pkgconfig()
PkgConfigDependency.class_pkgbin = self.pkgbin
else:
self.pkgbin = PkgConfigDependency.class_pkgbin
if not self.pkgbin:
if self.required:
raise DependencyException('Pkg-config not found.')
return
mlog.debug('Determining dependency {!r} with pkg-config executable '
'{!r}'.format(name, self.pkgbin.get_path()))
ret, self.version = self._call_pkgbin(['--modversion', name])
if ret != 0:
return
try:
# Fetch cargs to be used while using this dependency
self._set_cargs()
# Fetch the libraries and library paths needed for using this
self._set_libs()
except DependencyException as e:
if self.required:
raise
else:
self.compile_args = []
self.link_args = []
self.is_found = False
self.reason = e
self.is_found = True
def __repr__(self):
s = '<{0} {1}: {2} {3}>'
return s.format(self.__class__.__name__, self.name, self.is_found,
self.version_reqs)
def _call_pkgbin_real(self, args, env):
cmd = self.pkgbin.get_command() + args
p, out = Popen_safe(cmd, env=env)[0:2]
rc, out = p.returncode, out.strip()
call = ' '.join(cmd)
mlog.debug("Called `{}` -> {}\n{}".format(call, rc, out))
return rc, out
def _call_pkgbin(self, args, env=None):
if env is None:
fenv = env
env = os.environ
else:
fenv = frozenset(env.items())
targs = tuple(args)
cache = PkgConfigDependency.pkgbin_cache
if (self.pkgbin, targs, fenv) not in cache:
cache[(self.pkgbin, targs, fenv)] = self._call_pkgbin_real(args, env)
return cache[(self.pkgbin, targs, fenv)]
def _convert_mingw_paths(self, args):
'''
Both MSVC and native Python on Windows cannot handle MinGW-esque /c/foo
paths so convert them to C:/foo. We cannot resolve other paths starting
with / like /home/foo so leave them as-is so that the user gets an
error/warning from the compiler/linker.
'''
if not mesonlib.is_windows():
return args
converted = []
for arg in args:
pargs = []
# Library search path
if arg.startswith('-L/'):
pargs = PurePath(arg[2:]).parts
tmpl = '-L{}:/{}'
elif arg.startswith('-I/'):
pargs = PurePath(arg[2:]).parts
tmpl = '-I{}:/{}'
# Full path to library or .la file
elif arg.startswith('/'):
pargs = PurePath(arg).parts
tmpl = '{}:/{}'
if len(pargs) > 1 and len(pargs[1]) == 1:
arg = tmpl.format(pargs[1], '/'.join(pargs[2:]))
converted.append(arg)
return converted
def _set_cargs(self):
env = None
if self.language == 'fortran':
# gfortran doesn't appear to look in system paths for INCLUDE files,
# so don't allow pkg-config to suppress -I flags for system paths
env = os.environ.copy()
env['PKG_CONFIG_ALLOW_SYSTEM_CFLAGS'] = '1'
ret, out = self._call_pkgbin(['--cflags', self.name], env=env)
if ret != 0:
raise DependencyException('Could not generate cargs for %s:\n\n%s' %
(self.name, out))
self.compile_args = self._convert_mingw_paths(shlex.split(out))
def _search_libs(self, out, out_raw):
'''
@out: PKG_CONFIG_ALLOW_SYSTEM_LIBS=1 pkg-config --libs
@out_raw: pkg-config --libs
We always look for the file ourselves instead of depending on the
compiler to find it with -lfoo or foo.lib (if possible) because:
1. We want to be able to select static or shared
2. We need the full path of the library to calculate RPATH values
3. De-dup of libraries is easier when we have absolute paths
Libraries that are provided by the toolchain or are not found by
find_library() will be added with -L -l pairs.
'''
# Library paths should be safe to de-dup
#
# First, figure out what library paths to use. Originally, we were
# doing this as part of the loop, but due to differences in the order
# of -L values between pkg-config and pkgconf, we need to do that as
# a separate step. See:
# https://github.com/mesonbuild/meson/issues/3951
# https://github.com/mesonbuild/meson/issues/4023
#
# Separate system and prefix paths, and ensure that prefix paths are
# always searched first.
prefix_libpaths = OrderedSet()
# We also store this raw_link_args on the object later
raw_link_args = self._convert_mingw_paths(shlex.split(out_raw))
for arg in raw_link_args:
if arg.startswith('-L') and not arg.startswith(('-L-l', '-L-L')):
prefix_libpaths.add(arg[2:])
system_libpaths = OrderedSet()
full_args = self._convert_mingw_paths(shlex.split(out))
for arg in full_args:
if arg.startswith(('-L-l', '-L-L')):
# These are D language arguments, not library paths
continue
if arg.startswith('-L') and arg[2:] not in prefix_libpaths:
system_libpaths.add(arg[2:])
# Use this re-ordered path list for library resolution
libpaths = list(prefix_libpaths) + list(system_libpaths)
# Track -lfoo libraries to avoid duplicate work
libs_found = OrderedSet()
# Track not-found libraries to know whether to add library paths
libs_notfound = []
libtype = 'static' if self.static else 'default'
# Generate link arguments for this library
link_args = []
for lib in full_args:
if lib.startswith(('-L-l', '-L-L')):
# These are D language arguments, add them as-is
pass
elif lib.startswith('-L'):
# We already handled library paths above
continue
elif lib.startswith('-l'):
# Don't resolve the same -lfoo argument again
if lib in libs_found:
continue
if self.clib_compiler:
args = self.clib_compiler.find_library(lib[2:], self.env,
libpaths, libtype)
# If the project only uses a non-clib language such as D, Rust,
# C#, Python, etc, all we can do is limp along by adding the
# arguments as-is and then adding the libpaths at the end.
else:
args = None
if args is not None:
libs_found.add(lib)
# Replace -l arg with full path to library if available
# else, library is either to be ignored, or is provided by
# the compiler, can't be resolved, and should be used as-is
if args:
if not args[0].startswith('-l'):
lib = args[0]
else:
continue
else:
# Library wasn't found, maybe we're looking in the wrong
# places or the library will be provided with LDFLAGS or
# LIBRARY_PATH from the environment (on macOS), and many
# other edge cases that we can't account for.
#
# Add all -L paths and use it as -lfoo
if lib in libs_notfound:
continue
if self.static:
mlog.warning('Static library {!r} not found for dependency {!r}, may '
'not be statically linked'.format(lib[2:], self.name))
libs_notfound.append(lib)
elif lib.endswith(".la"):
shared_libname = self.extract_libtool_shlib(lib)
shared_lib = os.path.join(os.path.dirname(lib), shared_libname)
if not os.path.exists(shared_lib):
shared_lib = os.path.join(os.path.dirname(lib), ".libs", shared_libname)
if not os.path.exists(shared_lib):
raise DependencyException('Got a libtools specific "%s" dependencies'
'but we could not compute the actual shared'
'library path' % lib)
self.is_libtool = True
lib = shared_lib
if lib in link_args:
continue
link_args.append(lib)
# Add all -Lbar args if we have -lfoo args in link_args
if libs_notfound:
# Order of -L flags doesn't matter with ld, but it might with other
# linkers such as MSVC, so prepend them.
link_args = ['-L' + lp for lp in prefix_libpaths] + link_args
return link_args, raw_link_args
def _set_libs(self):
env = None
libcmd = [self.name, '--libs']
if self.static:
libcmd.append('--static')
# Force pkg-config to output -L fields even if they are system
# paths so we can do manual searching with cc.find_library() later.
env = os.environ.copy()
env['PKG_CONFIG_ALLOW_SYSTEM_LIBS'] = '1'
ret, out = self._call_pkgbin(libcmd, env=env)
if ret != 0:
raise DependencyException('Could not generate libs for %s:\n\n%s' %
(self.name, out))
# Also get the 'raw' output without -Lfoo system paths for adding -L
# args with -lfoo when a library can't be found, and also in
# gnome.generate_gir + gnome.gtkdoc which need -L -l arguments.
ret, out_raw = self._call_pkgbin(libcmd)
if ret != 0:
raise DependencyException('Could not generate libs for %s:\n\n%s' %
(self.name, out_raw))
self.link_args, self.raw_link_args = self._search_libs(out, out_raw)
def get_pkgconfig_variable(self, variable_name, kwargs):
options = ['--variable=' + variable_name, self.name]
if 'define_variable' in kwargs:
definition = kwargs.get('define_variable', [])
if not isinstance(definition, list):
raise MesonException('define_variable takes a list')
if len(definition) != 2 or not all(isinstance(i, str) for i in definition):
raise MesonException('define_variable must be made up of 2 strings for VARIABLENAME and VARIABLEVALUE')
options = ['--define-variable=' + '='.join(definition)] + options
ret, out = self._call_pkgbin(options)
variable = ''
if ret != 0:
if self.required:
raise DependencyException('dependency %s not found.' %
(self.name))
else:
variable = out.strip()
# pkg-config doesn't distinguish between empty and non-existent variables
# use the variable list to check for variable existence
if not variable:
ret, out = self._call_pkgbin(['--print-variables', self.name])
if not re.search(r'^' + variable_name + r'$', out, re.MULTILINE):
if 'default' in kwargs:
variable = kwargs['default']
else:
mlog.warning("pkgconfig variable '%s' not defined for dependency %s." % (variable_name, self.name))
mlog.debug('Got pkgconfig variable %s : %s' % (variable_name, variable))
return variable
@staticmethod
def get_methods():
return [DependencyMethods.PKGCONFIG]
def check_pkgconfig(self):
evar = 'PKG_CONFIG'
if evar in os.environ:
pkgbin = os.environ[evar].strip()
else:
pkgbin = 'pkg-config'
pkgbin = ExternalProgram(pkgbin, silent=True)
if pkgbin.found():
try:
p, out = Popen_safe(pkgbin.get_command() + ['--version'])[0:2]
if p.returncode != 0:
mlog.warning('Found pkg-config {!r} but couldn\'t run it'
''.format(' '.join(pkgbin.get_command())))
# Set to False instead of None to signify that we've already
# searched for it and not found it
pkgbin = False
except (FileNotFoundError, PermissionError):
pkgbin = False
else:
pkgbin = False
if not self.silent:
if pkgbin:
mlog.log('Found pkg-config:', mlog.bold(pkgbin.get_path()),
'(%s)' % out.strip())
else:
mlog.log('Found Pkg-config:', mlog.red('NO'))
return pkgbin
def extract_field(self, la_file, fieldname):
with open(la_file) as f:
for line in f:
arr = line.strip().split('=')
if arr[0] == fieldname:
return arr[1][1:-1]
return None
def extract_dlname_field(self, la_file):
return self.extract_field(la_file, 'dlname')
def extract_libdir_field(self, la_file):
return self.extract_field(la_file, 'libdir')
def extract_libtool_shlib(self, la_file):
'''
Returns the path to the shared library
corresponding to this .la file
'''
dlname = self.extract_dlname_field(la_file)
if dlname is None:
return None
# Darwin uses absolute paths where possible; since the libtool files never
# contain absolute paths, use the libdir field
if mesonlib.is_osx():
dlbasename = os.path.basename(dlname)
libdir = self.extract_libdir_field(la_file)
if libdir is None:
return dlbasename
return os.path.join(libdir, dlbasename)
# From the comments in extract_libtool(), older libtools had
# a path rather than the raw dlname
return os.path.basename(dlname)
def log_tried(self):
return self.type_name
class DubDependency(ExternalDependency):
class_dubbin = None
def __init__(self, name, environment, kwargs):
super().__init__('dub', environment, 'd', kwargs)
self.name = name
self.compiler = super().get_compiler()
self.module_path = None
if 'required' in kwargs:
self.required = kwargs.get('required')
if DubDependency.class_dubbin is None:
self.dubbin = self._check_dub()
DubDependency.class_dubbin = self.dubbin
else:
self.dubbin = DubDependency.class_dubbin
if not self.dubbin:
if self.required:
raise DependencyException('DUB not found.')
self.is_found = False
return
mlog.debug('Determining dependency {!r} with DUB executable '
'{!r}'.format(name, self.dubbin.get_path()))
# we need to know the target architecture
arch = self.compiler.arch
# Ask dub for the package
ret, res = self._call_dubbin(['describe', name, '--arch=' + arch])
if ret != 0:
self.is_found = False
return
comp = self.compiler.get_id().replace('llvm', 'ldc').replace('gcc', 'gdc')
packages = []
description = json.loads(res)
for package in description['packages']:
packages.append(package['name'])
if package['name'] == name:
self.is_found = True
not_lib = True
if 'targetType' in package:
if package['targetType'] == 'library':
not_lib = False
if not_lib:
mlog.error(mlog.bold(name), "found but it isn't a library")
self.is_found = False
return
self.module_path = self._find_right_lib_path(package['path'], comp, description, True, package['targetFileName'])
if not os.path.exists(self.module_path):
# check if the dependency was built for other archs
archs = [['x86_64'], ['x86'], ['x86', 'x86_mscoff']]
for a in archs:
description_a = copy.deepcopy(description)
description_a['architecture'] = a
arch_module_path = self._find_right_lib_path(package['path'], comp, description_a, True, package['targetFileName'])
if arch_module_path:
mlog.error(mlog.bold(name), "found but it wasn't compiled for", mlog.bold(arch))
self.is_found = False
return
mlog.error(mlog.bold(name), "found but it wasn't compiled with", mlog.bold(comp))
self.is_found = False
return
self.version = package['version']
self.pkg = package
if self.pkg['targetFileName'].endswith('.a'):
self.static = True
self.compile_args = []
for flag in self.pkg['dflags']:
self.link_args.append(flag)
for path in self.pkg['importPaths']:
self.compile_args.append('-I' + os.path.join(self.pkg['path'], path))
self.link_args = self.raw_link_args = []
for flag in self.pkg['lflags']:
self.link_args.append(flag)
self.link_args.append(os.path.join(self.module_path, self.pkg['targetFileName']))
# Handle dependencies
libs = []
def add_lib_args(field_name, target):
if field_name in target['buildSettings']:
for lib in target['buildSettings'][field_name]:
if lib not in libs:
libs.append(lib)
if os.name is not 'nt':
pkgdep = PkgConfigDependency(lib, environment, {'required': 'true', 'silent': 'true'})
for arg in pkgdep.get_compile_args():
self.compile_args.append(arg)
for arg in pkgdep.get_link_args():
self.link_args.append(arg)
for arg in pkgdep.get_link_args(raw=True):
self.raw_link_args.append(arg)
for target in description['targets']:
if target['rootPackage'] in packages:
add_lib_args('libs', target)
add_lib_args('libs-{}'.format(platform.machine()), target)
for file in target['buildSettings']['linkerFiles']:
lib_path = self._find_right_lib_path(file, comp, description)
if lib_path:
self.link_args.append(lib_path)
else:
self.is_found = False
def get_compiler(self):
return self.compiler
def _find_right_lib_path(self, default_path, comp, description, folder_only=False, file_name=''):
module_path = lib_file_name = ''
if folder_only:
module_path = default_path
lib_file_name = file_name
else:
module_path = os.path.dirname(default_path)
lib_file_name = os.path.basename(default_path)
module_build_path = os.path.join(module_path, '.dub', 'build')
# Get D version implemented in the compiler
# gdc doesn't support this
ret, res = self._call_dubbin(['--version'])
if ret != 0:
mlog.error('Failed to run {!r}', mlog.bold(comp))
return
d_ver = re.search('v[0-9].[0-9][0-9][0-9].[0-9]', res) # Ex.: v2.081.2
if d_ver is not None:
d_ver = d_ver.group().rsplit('.', 1)[0].replace('v', '').replace('.', '') # Fix structure. Ex.: 2081
else:
d_ver = '' # gdc
if not os.path.isdir(module_build_path):
return ''
# Ex.: library-debug-linux.posix-x86_64-ldc_2081-EF934983A3319F8F8FF2F0E107A363BA
build_name = 'library-{}-{}-{}-{}_{}'.format(description['buildType'], '.'.join(description['platform']), '.'.join(description['architecture']), comp, d_ver)
for entry in os.listdir(module_build_path):
if entry.startswith(build_name):
for file in os.listdir(os.path.join(module_build_path, entry)):
if file == lib_file_name:
if folder_only:
return os.path.join(module_build_path, entry)
else:
return os.path.join(module_build_path, entry, lib_file_name)
return ''
def _call_dubbin(self, args, env=None):
p, out = Popen_safe(self.dubbin.get_command() + args, env=env)[0:2]
return p.returncode, out.strip()
def _call_copmbin(self, args, env=None):
p, out = Popen_safe(self.compiler.get_exelist() + args, env=env)[0:2]
return p.returncode, out.strip()
def _check_dub(self):
dubbin = ExternalProgram('dub', silent=True)
if dubbin.found():
try:
p, out = Popen_safe(dubbin.get_command() + ['--version'])[0:2]
if p.returncode != 0:
mlog.warning('Found dub {!r} but couldn\'t run it'
''.format(' '.join(dubbin.get_command())))
# Set to False instead of None to signify that we've already
# searched for it and not found it
dubbin = False
except (FileNotFoundError, PermissionError):
dubbin = False
else:
dubbin = False
if dubbin:
mlog.log('Found DUB:', mlog.bold(dubbin.get_path()),
'(%s)' % out.strip())
else:
mlog.log('Found DUB:', mlog.red('NO'))
return dubbin
@staticmethod
def get_methods():
return [DependencyMethods.DUB]
class ExternalProgram:
windows_exts = ('exe', 'msc', 'com', 'bat', 'cmd')
def __init__(self, name, command=None, silent=False, search_dir=None):
self.name = name
if command is not None:
self.command = listify(command)
else:
self.command = self._search(name, search_dir)
# Set path to be the last item that is actually a file (in order to
# skip options in something like ['python', '-u', 'file.py']. If we
# can't find any components, default to the last component of the path.
self.path = self.command[-1]
for i in range(len(self.command) - 1, -1, -1):
arg = self.command[i]
if arg is not None and os.path.isfile(arg):
self.path = arg
break
if not silent:
if self.found():
mlog.log('Program', mlog.bold(name), 'found:', mlog.green('YES'),
'(%s)' % ' '.join(self.command))
else:
mlog.log('Program', mlog.bold(name), 'found:', mlog.red('NO'))
def __repr__(self):
r = '<{} {!r} -> {!r}>'
return r.format(self.__class__.__name__, self.name, self.command)
def description(self):
'''Human friendly description of the command'''
return ' '.join(self.command)
@staticmethod
def from_cross_info(cross_info, name):
if name not in cross_info.config['binaries']:
return NonExistingExternalProgram()
command = cross_info.config['binaries'][name]
if not isinstance(command, (list, str)):
raise MesonException('Invalid type {!r} for binary {!r} in cross file'
''.format(command, name))
if isinstance(command, list):
if len(command) == 1:
command = command[0]
# We cannot do any searching if the command is a list, and we don't
# need to search if the path is an absolute path.
if isinstance(command, list) or os.path.isabs(command):
return ExternalProgram(name, command=command, silent=True)
# Search for the command using the specified string!
return ExternalProgram(command, silent=True)
@staticmethod
def _shebang_to_cmd(script):
"""
Check if the file has a shebang and manually parse it to figure out
the interpreter to use. This is useful if the script is not executable
or if we're on Windows (which does not understand shebangs).
"""
try:
with open(script) as f:
first_line = f.readline().strip()
if first_line.startswith('#!'):
# In a shebang, everything before the first space is assumed to
# be the command to run and everything after the first space is
# the single argument to pass to that command. So we must split
# exactly once.
commands = first_line[2:].split('#')[0].strip().split(maxsplit=1)
if mesonlib.is_windows():
# Windows does not have UNIX paths so remove them,
# but don't remove Windows paths
if commands[0].startswith('/'):
commands[0] = commands[0].split('/')[-1]
if len(commands) > 0 and commands[0] == 'env':
commands = commands[1:]
# Windows does not ship python3.exe, but we know the path to it
if len(commands) > 0 and commands[0] == 'python3':
commands = mesonlib.python_command + commands[1:]
elif mesonlib.is_haiku():
# Haiku does not have /usr, but a lot of scripts assume that
# /usr/bin/env always exists. Detect that case and run the
# script with the interpreter after it.
if commands[0] == '/usr/bin/env':
commands = commands[1:]
# We know what python3 is, we're running on it
if len(commands) > 0 and commands[0] == 'python3':
commands = mesonlib.python_command + commands[1:]
return commands + [script]
except Exception as e:
mlog.debug(e)
pass
mlog.debug('Unusable script {!r}'.format(script))
return False
def _is_executable(self, path):
suffix = os.path.splitext(path)[-1].lower()[1:]
if mesonlib.is_windows():
if suffix in self.windows_exts:
return True
elif os.access(path, os.X_OK):
return not os.path.isdir(path)
return False
def _search_dir(self, name, search_dir):
if search_dir is None:
return False
trial = os.path.join(search_dir, name)
if os.path.exists(trial):
if self._is_executable(trial):
return [trial]
# Now getting desperate. Maybe it is a script file that is
# a) not chmodded executable, or
# b) we are on windows so they can't be directly executed.
return self._shebang_to_cmd(trial)
else:
if mesonlib.is_windows():
for ext in self.windows_exts:
trial_ext = '{}.{}'.format(trial, ext)
if os.path.exists(trial_ext):
return [trial_ext]
return False
def _search_windows_special_cases(self, name, command):
'''
Lots of weird Windows quirks:
1. PATH search for @name returns files with extensions from PATHEXT,
but only self.windows_exts are executable without an interpreter.
2. @name might be an absolute path to an executable, but without the
extension. This works inside MinGW so people use it a lot.
3. The script is specified without an extension, in which case we have
to manually search in PATH.
4. More special-casing for the shebang inside the script.
'''
if command:
# On Windows, even if the PATH search returned a full path, we can't be
# sure that it can be run directly if it's not a native executable.
# For instance, interpreted scripts sometimes need to be run explicitly
# with an interpreter if the file association is not done properly.
name_ext = os.path.splitext(command)[1]
if name_ext[1:].lower() in self.windows_exts:
# Good, it can be directly executed
return [command]
# Try to extract the interpreter from the shebang
commands = self._shebang_to_cmd(command)
if commands:
return commands
return [None]
# Maybe the name is an absolute path to a native Windows
# executable, but without the extension. This is technically wrong,
# but many people do it because it works in the MinGW shell.
if os.path.isabs(name):
for ext in self.windows_exts:
command = '{}.{}'.format(name, ext)
if os.path.exists(command):
return [command]
# On Windows, interpreted scripts must have an extension otherwise they
# cannot be found by a standard PATH search. So we do a custom search
# where we manually search for a script with a shebang in PATH.
search_dirs = os.environ.get('PATH', '').split(';')
for search_dir in search_dirs:
commands = self._search_dir(name, search_dir)
if commands:
return commands
return [None]
def _search(self, name, search_dir):
'''
Search in the specified dir for the specified executable by name
and if not found search in PATH
'''
commands = self._search_dir(name, search_dir)
if commands:
return commands
# Do a standard search in PATH
command = shutil.which(name)
if mesonlib.is_windows():
return self._search_windows_special_cases(name, command)
# On UNIX-like platforms, shutil.which() is enough to find
# all executables whether in PATH or with an absolute path
return [command]
def found(self):
return self.command[0] is not None
def get_command(self):
return self.command[:]
def get_path(self):
return self.path
def get_name(self):
return self.name
class NonExistingExternalProgram(ExternalProgram):
"A program that will never exist"
def __init__(self):
self.name = 'nonexistingprogram'
self.command = [None]
self.path = None
def __repr__(self):
r = '<{} {!r} -> {!r}>'
return r.format(self.__class__.__name__, self.name, self.command)
def found(self):
return False
class EmptyExternalProgram(ExternalProgram):
'''
A program object that returns an empty list of commands. Used for cases
such as a cross file exe_wrapper to represent that it's not required.
'''
def __init__(self):
self.name = None
self.command = []
self.path = None
def __repr__(self):
r = '<{} {!r} -> {!r}>'
return r.format(self.__class__.__name__, self.name, self.command)
def found(self):
return True
class ExternalLibrary(ExternalDependency):
def __init__(self, name, link_args, environment, language, silent=False):
super().__init__('library', environment, language, {})
self.name = name
self.language = language
self.is_found = False
if link_args:
self.is_found = True
self.link_args = link_args
if not silent:
if self.is_found:
mlog.log('Library', mlog.bold(name), 'found:', mlog.green('YES'))
else:
mlog.log('Library', mlog.bold(name), 'found:', mlog.red('NO'))
def get_link_args(self, language=None, **kwargs):
'''
External libraries detected using a compiler must only be used with
compatible code. For instance, Vala libraries (.vapi files) cannot be
used with C code, and not all Rust library types can be linked with
C-like code. Note that C++ libraries *can* be linked with C code with
a C++ linker (and vice-versa).
'''
# Using a vala library in a non-vala target, or a non-vala library in a vala target
# XXX: This should be extended to other non-C linkers such as Rust
if (self.language == 'vala' and language != 'vala') or \
(language == 'vala' and self.language != 'vala'):
return []
return super().get_link_args(**kwargs)
def get_partial_dependency(self, *, compile_args=False, link_args=False,
links=False, includes=False, sources=False):
# External library only has link_args, so ignore the rest of the
# interface.
new = copy.copy(self)
if not link_args:
new.link_args = []
return new
class ExtraFrameworkDependency(ExternalDependency):
def __init__(self, name, required, path, env, lang, kwargs):
super().__init__('extraframeworks', env, lang, kwargs)
self.name = name
self.required = required
self.detect(name, path)
if self.found():
self.compile_args = ['-I' + os.path.join(self.path, self.name, 'Headers')]
self.link_args = ['-F' + self.path, '-framework', self.name.split('.')[0]]
def detect(self, name, path):
lname = name.lower()
if path is None:
paths = ['/System/Library/Frameworks', '/Library/Frameworks']
else:
paths = [path]
for p in paths:
for d in os.listdir(p):
fullpath = os.path.join(p, d)
if lname != d.rsplit('.', 1)[0].lower():
continue
if not stat.S_ISDIR(os.stat(fullpath).st_mode):
continue
self.path = p
self.name = d
self.is_found = True
return
def log_info(self):
return os.path.join(self.path, self.name)
def log_tried(self):
return 'framework'
def get_dep_identifier(name, kwargs, want_cross):
# Need immutable objects since the identifier will be used as a dict key
version_reqs = listify(kwargs.get('version', []))
if isinstance(version_reqs, list):
version_reqs = frozenset(version_reqs)
identifier = (name, version_reqs, want_cross)
for key, value in kwargs.items():
# 'version' is embedded above as the second element for easy access
# 'native' is handled above with `want_cross`
# 'required' is irrelevant for caching; the caller handles it separately
# 'fallback' subprojects cannot be cached -- they must be initialized
if key in ('version', 'native', 'required', 'fallback',):
continue
# All keyword arguments are strings, ints, or lists (or lists of lists)
if isinstance(value, list):
value = frozenset(listify(value))
identifier += (key, value)
return identifier
display_name_map = {
'boost': 'Boost',
'dub': 'DUB',
'gmock': 'GMock',
'gtest': 'GTest',
'llvm': 'LLVM',
'mpi': 'MPI',
'openmp': 'OpenMP',
'wxwidgets': 'WxWidgets',
}
def find_external_dependency(name, env, kwargs):
assert(name)
required = kwargs.get('required', True)
if not isinstance(required, bool):
raise DependencyException('Keyword "required" must be a boolean.')
if not isinstance(kwargs.get('method', ''), str):
raise DependencyException('Keyword "method" must be a string.')
lname = name.lower()
if lname not in _packages_accept_language and 'language' in kwargs:
raise DependencyException('%s dependency does not accept "language" keyword argument' % (name, ))
if not isinstance(kwargs.get('version', ''), (str, list)):
raise DependencyException('Keyword "Version" must be string or list.')
# display the dependency name with correct casing
display_name = display_name_map.get(lname, lname)
# if this isn't a cross-build, it's uninteresting if native: is used or not
if not env.is_cross_build():
type_text = 'Dependency'
else:
type_text = 'Native' if kwargs.get('native', False) else 'Cross'
type_text += ' dependency'
# build a list of dependency methods to try
candidates = _build_external_dependency_list(name, env, kwargs)
pkg_exc = None
pkgdep = []
details = ''
for c in candidates:
# try this dependency method
try:
d = c()
d._check_version()
pkgdep.append(d)
except Exception as e:
mlog.debug(str(e))
# store the first exception we see
if not pkg_exc:
pkg_exc = e
else:
details = d.log_details()
if details:
details = '(' + details + ') '
if 'language' in kwargs:
details += 'for ' + d.language + ' '
# if the dependency was found
if d.found():
info = []
if d.version:
info.append(d.version)
log_info = d.log_info()
if log_info:
info.append('(' + log_info + ')')
info = ' '.join(info)
mlog.log(type_text, mlog.bold(display_name), details + 'found:', mlog.green('YES'), info)
return d
# otherwise, the dependency could not be found
tried_methods = [d.log_tried() for d in pkgdep if d.log_tried()]
if tried_methods:
tried = '{}'.format(mlog.format_list(tried_methods))
else:
tried = ''
mlog.log(type_text, mlog.bold(display_name), details + 'found:', mlog.red('NO'),
'(tried {})'.format(tried) if tried else '')
if required:
# if exception(s) occurred, re-raise the first one (on the grounds that
# it came from a preferred dependency detection method)
if pkg_exc:
raise pkg_exc
# we have a list of failed ExternalDependency objects, so we can report
# the methods we tried to find the dependency
raise DependencyException('Dependency "%s" not found, tried %s' % (name, tried))
# return the last failed dependency object
if pkgdep:
return pkgdep[-1]
# this should never happen
raise DependencyException('Dependency "%s" not found, but no dependency object to return' % (name))
def _build_external_dependency_list(name, env, kwargs):
# Is there a specific dependency detector for this dependency?
lname = name.lower()
if lname in packages:
# Create the list of dependency object constructors using a factory
# class method, if one exists, otherwise the list just consists of the
# constructor
if getattr(packages[lname], '_factory', None):
dep = packages[lname]._factory(env, kwargs)
else:
dep = [functools.partial(packages[lname], env, kwargs)]
return dep
candidates = []
# If it's explicitly requested, use the dub detection method (only)
if 'dub' == kwargs.get('method', ''):
candidates.append(functools.partial(DubDependency, name, env, kwargs))
return candidates
# TBD: other values of method should control what method(s) are used
# Otherwise, just use the pkgconfig dependency detector
candidates.append(functools.partial(PkgConfigDependency, name, env, kwargs))
# On OSX, also try framework dependency detector
if mesonlib.is_osx():
candidates.append(functools.partial(ExtraFrameworkDependency, name,
False, None, env, None, kwargs))
return candidates
def strip_system_libdirs(environment, link_args):
"""Remove -L<system path> arguments.
leaving these in will break builds where a user has a version of a library
in the system path, and a different version not in the system path if they
want to link against the non-system path version.
"""
exclude = {'-L{}'.format(p) for p in environment.get_compiler_system_dirs()}
return [l for l in link_args if l not in exclude]
``` |
{
"source": "jmgao/skybot",
"score": 3
} |
#### File: skybot/plugins/twitter.py
```python
from __future__ import unicode_literals
import random
import re
from time import strptime, strftime
from urllib.parse import quote
from util import hook, http
@hook.api_key("twitter")
@hook.command
def twitter(inp, api_key=None):
".twitter <user>/<user> <n>/<id>/#<search>/#<search> <n> -- " "get <user>'s last/<n>th tweet/get tweet <id>/do <search>/get <n>th <search> result"
if not isinstance(api_key, dict) or any(
key not in api_key
for key in ("consumer", "consumer_secret", "access", "access_secret")
):
return "error: api keys not set"
getting_id = False
doing_search = False
index_specified = False
if re.match(r"^\d+$", inp):
getting_id = True
request_url = "https://api.twitter.com/1.1/statuses/show.json?id=%s" % inp
else:
try:
inp, index = re.split("\s+", inp, 1)
index = int(index)
index_specified = True
except ValueError:
index = 0
if index < 0:
index = 0
if index >= 20:
return "error: only supports up to the 20th tweet"
if re.match(r"^#", inp):
doing_search = True
request_url = "https://api.twitter.com/1.1/search/tweets.json?q=%s" % quote(
inp
)
else:
request_url = (
"https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name=%s"
% inp
)
try:
tweet = http.get_json(
request_url, oauth=True, oauth_keys=api_key, tweet_mode="extended"
)
except http.HTTPError as e:
errors = {
400: "bad request (ratelimited?)",
401: "unauthorized",
403: "forbidden",
404: "invalid user/id",
500: "twitter is broken",
502: 'twitter is down ("getting upgraded")',
503: "twitter is overloaded (lol, RoR)",
410: "twitter shut off api v1.",
}
if e.code == 404:
return "error: invalid " + ["username", "tweet id"][getting_id]
if e.code in errors:
return "error: " + errors[e.code]
return "error: unknown %s" % e.code
if doing_search:
try:
tweet = tweet["statuses"]
if not index_specified:
index = random.randint(0, len(tweet) - 1)
except KeyError:
return "error: no results"
if not getting_id:
try:
tweet = tweet[index]
except IndexError:
return "error: not that many tweets found"
if "retweeted_status" in tweet:
rt = tweet["retweeted_status"]
rt_text = http.unescape(rt["full_text"]).replace("\n", " ")
text = "RT @%s %s" % (rt["user"]["screen_name"], rt_text)
else:
text = http.unescape(tweet["full_text"]).replace("\n", " ")
for url in tweet.get('entities', {}).get('urls', []):
new_text = text.replace(url['url'], url['expanded_url'])
if len(new_text) < 350:
text = new_text
for url in tweet.get('extended_entities', tweet.get('entities', {})).get('media', []):
if url['type'] in ('video', 'animated_gif'):
try:
media_url = max(url['video_info']['variants'], key=lambda x: x.get('bitrate', 0))['url']
except KeyError:
continue
else:
media_url = url['media_url_https']
if url['url'] in text:
new_text = text.replace(url['url'], media_url)
else:
new_text = text + ' ' + media_url
if len(new_text) < 400:
text = new_text
screen_name = tweet["user"]["screen_name"]
time = tweet["created_at"]
time = strftime("%Y-%m-%d %H:%M:%S", strptime(time, "%a %b %d %H:%M:%S +0000 %Y"))
return "%s \x02%s\x02: %s" % (time, screen_name, text)
@hook.api_key("twitter")
@hook.regex(r"https?://(mobile\.)?twitter.com/(#!/)?([_0-9a-zA-Z]+)/status/(?P<id>\d+)")
def show_tweet(match, api_key=None):
return twitter(match.group("id"), api_key)
``` |
{
"source": "jmgc/pyston",
"score": 3
} |
#### File: pyston/microbenchmarks/sort2_20.py
```python
def f(l):
if l[0] > l[1]: l[0], l[1] = l[1], l[0]
if l[0] > l[2]: l[0], l[2] = l[2], l[0]
if l[0] > l[3]: l[0], l[3] = l[3], l[0]
if l[0] > l[4]: l[0], l[4] = l[4], l[0]
if l[0] > l[5]: l[0], l[5] = l[5], l[0]
if l[0] > l[6]: l[0], l[6] = l[6], l[0]
if l[0] > l[7]: l[0], l[7] = l[7], l[0]
if l[0] > l[8]: l[0], l[8] = l[8], l[0]
if l[0] > l[9]: l[0], l[9] = l[9], l[0]
if l[0] > l[10]: l[0], l[10] = l[10], l[0]
if l[0] > l[11]: l[0], l[11] = l[11], l[0]
if l[0] > l[12]: l[0], l[12] = l[12], l[0]
if l[0] > l[13]: l[0], l[13] = l[13], l[0]
if l[0] > l[14]: l[0], l[14] = l[14], l[0]
if l[0] > l[15]: l[0], l[15] = l[15], l[0]
if l[0] > l[16]: l[0], l[16] = l[16], l[0]
if l[0] > l[17]: l[0], l[17] = l[17], l[0]
if l[0] > l[18]: l[0], l[18] = l[18], l[0]
if l[0] > l[19]: l[0], l[19] = l[19], l[0]
if l[1] > l[2]: l[1], l[2] = l[2], l[1]
if l[1] > l[3]: l[1], l[3] = l[3], l[1]
if l[1] > l[4]: l[1], l[4] = l[4], l[1]
if l[1] > l[5]: l[1], l[5] = l[5], l[1]
if l[1] > l[6]: l[1], l[6] = l[6], l[1]
if l[1] > l[7]: l[1], l[7] = l[7], l[1]
if l[1] > l[8]: l[1], l[8] = l[8], l[1]
if l[1] > l[9]: l[1], l[9] = l[9], l[1]
if l[1] > l[10]: l[1], l[10] = l[10], l[1]
if l[1] > l[11]: l[1], l[11] = l[11], l[1]
if l[1] > l[12]: l[1], l[12] = l[12], l[1]
if l[1] > l[13]: l[1], l[13] = l[13], l[1]
if l[1] > l[14]: l[1], l[14] = l[14], l[1]
if l[1] > l[15]: l[1], l[15] = l[15], l[1]
if l[1] > l[16]: l[1], l[16] = l[16], l[1]
if l[1] > l[17]: l[1], l[17] = l[17], l[1]
if l[1] > l[18]: l[1], l[18] = l[18], l[1]
if l[1] > l[19]: l[1], l[19] = l[19], l[1]
if l[2] > l[3]: l[2], l[3] = l[3], l[2]
if l[2] > l[4]: l[2], l[4] = l[4], l[2]
if l[2] > l[5]: l[2], l[5] = l[5], l[2]
if l[2] > l[6]: l[2], l[6] = l[6], l[2]
if l[2] > l[7]: l[2], l[7] = l[7], l[2]
if l[2] > l[8]: l[2], l[8] = l[8], l[2]
if l[2] > l[9]: l[2], l[9] = l[9], l[2]
if l[2] > l[10]: l[2], l[10] = l[10], l[2]
if l[2] > l[11]: l[2], l[11] = l[11], l[2]
if l[2] > l[12]: l[2], l[12] = l[12], l[2]
if l[2] > l[13]: l[2], l[13] = l[13], l[2]
if l[2] > l[14]: l[2], l[14] = l[14], l[2]
if l[2] > l[15]: l[2], l[15] = l[15], l[2]
if l[2] > l[16]: l[2], l[16] = l[16], l[2]
if l[2] > l[17]: l[2], l[17] = l[17], l[2]
if l[2] > l[18]: l[2], l[18] = l[18], l[2]
if l[2] > l[19]: l[2], l[19] = l[19], l[2]
if l[3] > l[4]: l[3], l[4] = l[4], l[3]
if l[3] > l[5]: l[3], l[5] = l[5], l[3]
if l[3] > l[6]: l[3], l[6] = l[6], l[3]
if l[3] > l[7]: l[3], l[7] = l[7], l[3]
if l[3] > l[8]: l[3], l[8] = l[8], l[3]
if l[3] > l[9]: l[3], l[9] = l[9], l[3]
if l[3] > l[10]: l[3], l[10] = l[10], l[3]
if l[3] > l[11]: l[3], l[11] = l[11], l[3]
if l[3] > l[12]: l[3], l[12] = l[12], l[3]
if l[3] > l[13]: l[3], l[13] = l[13], l[3]
if l[3] > l[14]: l[3], l[14] = l[14], l[3]
if l[3] > l[15]: l[3], l[15] = l[15], l[3]
if l[3] > l[16]: l[3], l[16] = l[16], l[3]
if l[3] > l[17]: l[3], l[17] = l[17], l[3]
if l[3] > l[18]: l[3], l[18] = l[18], l[3]
if l[3] > l[19]: l[3], l[19] = l[19], l[3]
if l[4] > l[5]: l[4], l[5] = l[5], l[4]
if l[4] > l[6]: l[4], l[6] = l[6], l[4]
if l[4] > l[7]: l[4], l[7] = l[7], l[4]
if l[4] > l[8]: l[4], l[8] = l[8], l[4]
if l[4] > l[9]: l[4], l[9] = l[9], l[4]
if l[4] > l[10]: l[4], l[10] = l[10], l[4]
if l[4] > l[11]: l[4], l[11] = l[11], l[4]
if l[4] > l[12]: l[4], l[12] = l[12], l[4]
if l[4] > l[13]: l[4], l[13] = l[13], l[4]
if l[4] > l[14]: l[4], l[14] = l[14], l[4]
if l[4] > l[15]: l[4], l[15] = l[15], l[4]
if l[4] > l[16]: l[4], l[16] = l[16], l[4]
if l[4] > l[17]: l[4], l[17] = l[17], l[4]
if l[4] > l[18]: l[4], l[18] = l[18], l[4]
if l[4] > l[19]: l[4], l[19] = l[19], l[4]
if l[5] > l[6]: l[5], l[6] = l[6], l[5]
if l[5] > l[7]: l[5], l[7] = l[7], l[5]
if l[5] > l[8]: l[5], l[8] = l[8], l[5]
if l[5] > l[9]: l[5], l[9] = l[9], l[5]
if l[5] > l[10]: l[5], l[10] = l[10], l[5]
if l[5] > l[11]: l[5], l[11] = l[11], l[5]
if l[5] > l[12]: l[5], l[12] = l[12], l[5]
if l[5] > l[13]: l[5], l[13] = l[13], l[5]
if l[5] > l[14]: l[5], l[14] = l[14], l[5]
if l[5] > l[15]: l[5], l[15] = l[15], l[5]
if l[5] > l[16]: l[5], l[16] = l[16], l[5]
if l[5] > l[17]: l[5], l[17] = l[17], l[5]
if l[5] > l[18]: l[5], l[18] = l[18], l[5]
if l[5] > l[19]: l[5], l[19] = l[19], l[5]
if l[6] > l[7]: l[6], l[7] = l[7], l[6]
if l[6] > l[8]: l[6], l[8] = l[8], l[6]
if l[6] > l[9]: l[6], l[9] = l[9], l[6]
if l[6] > l[10]: l[6], l[10] = l[10], l[6]
if l[6] > l[11]: l[6], l[11] = l[11], l[6]
if l[6] > l[12]: l[6], l[12] = l[12], l[6]
if l[6] > l[13]: l[6], l[13] = l[13], l[6]
if l[6] > l[14]: l[6], l[14] = l[14], l[6]
if l[6] > l[15]: l[6], l[15] = l[15], l[6]
if l[6] > l[16]: l[6], l[16] = l[16], l[6]
if l[6] > l[17]: l[6], l[17] = l[17], l[6]
if l[6] > l[18]: l[6], l[18] = l[18], l[6]
if l[6] > l[19]: l[6], l[19] = l[19], l[6]
if l[7] > l[8]: l[7], l[8] = l[8], l[7]
if l[7] > l[9]: l[7], l[9] = l[9], l[7]
if l[7] > l[10]: l[7], l[10] = l[10], l[7]
if l[7] > l[11]: l[7], l[11] = l[11], l[7]
if l[7] > l[12]: l[7], l[12] = l[12], l[7]
if l[7] > l[13]: l[7], l[13] = l[13], l[7]
if l[7] > l[14]: l[7], l[14] = l[14], l[7]
if l[7] > l[15]: l[7], l[15] = l[15], l[7]
if l[7] > l[16]: l[7], l[16] = l[16], l[7]
if l[7] > l[17]: l[7], l[17] = l[17], l[7]
if l[7] > l[18]: l[7], l[18] = l[18], l[7]
if l[7] > l[19]: l[7], l[19] = l[19], l[7]
if l[8] > l[9]: l[8], l[9] = l[9], l[8]
if l[8] > l[10]: l[8], l[10] = l[10], l[8]
if l[8] > l[11]: l[8], l[11] = l[11], l[8]
if l[8] > l[12]: l[8], l[12] = l[12], l[8]
if l[8] > l[13]: l[8], l[13] = l[13], l[8]
if l[8] > l[14]: l[8], l[14] = l[14], l[8]
if l[8] > l[15]: l[8], l[15] = l[15], l[8]
if l[8] > l[16]: l[8], l[16] = l[16], l[8]
if l[8] > l[17]: l[8], l[17] = l[17], l[8]
if l[8] > l[18]: l[8], l[18] = l[18], l[8]
if l[8] > l[19]: l[8], l[19] = l[19], l[8]
if l[9] > l[10]: l[9], l[10] = l[10], l[9]
if l[9] > l[11]: l[9], l[11] = l[11], l[9]
if l[9] > l[12]: l[9], l[12] = l[12], l[9]
if l[9] > l[13]: l[9], l[13] = l[13], l[9]
if l[9] > l[14]: l[9], l[14] = l[14], l[9]
if l[9] > l[15]: l[9], l[15] = l[15], l[9]
if l[9] > l[16]: l[9], l[16] = l[16], l[9]
if l[9] > l[17]: l[9], l[17] = l[17], l[9]
if l[9] > l[18]: l[9], l[18] = l[18], l[9]
if l[9] > l[19]: l[9], l[19] = l[19], l[9]
if l[10] > l[11]: l[10], l[11] = l[11], l[10]
if l[10] > l[12]: l[10], l[12] = l[12], l[10]
if l[10] > l[13]: l[10], l[13] = l[13], l[10]
if l[10] > l[14]: l[10], l[14] = l[14], l[10]
if l[10] > l[15]: l[10], l[15] = l[15], l[10]
if l[10] > l[16]: l[10], l[16] = l[16], l[10]
if l[10] > l[17]: l[10], l[17] = l[17], l[10]
if l[10] > l[18]: l[10], l[18] = l[18], l[10]
if l[10] > l[19]: l[10], l[19] = l[19], l[10]
if l[11] > l[12]: l[11], l[12] = l[12], l[11]
if l[11] > l[13]: l[11], l[13] = l[13], l[11]
if l[11] > l[14]: l[11], l[14] = l[14], l[11]
if l[11] > l[15]: l[11], l[15] = l[15], l[11]
if l[11] > l[16]: l[11], l[16] = l[16], l[11]
if l[11] > l[17]: l[11], l[17] = l[17], l[11]
if l[11] > l[18]: l[11], l[18] = l[18], l[11]
if l[11] > l[19]: l[11], l[19] = l[19], l[11]
if l[12] > l[13]: l[12], l[13] = l[13], l[12]
if l[12] > l[14]: l[12], l[14] = l[14], l[12]
if l[12] > l[15]: l[12], l[15] = l[15], l[12]
if l[12] > l[16]: l[12], l[16] = l[16], l[12]
if l[12] > l[17]: l[12], l[17] = l[17], l[12]
if l[12] > l[18]: l[12], l[18] = l[18], l[12]
if l[12] > l[19]: l[12], l[19] = l[19], l[12]
if l[13] > l[14]: l[13], l[14] = l[14], l[13]
if l[13] > l[15]: l[13], l[15] = l[15], l[13]
if l[13] > l[16]: l[13], l[16] = l[16], l[13]
if l[13] > l[17]: l[13], l[17] = l[17], l[13]
if l[13] > l[18]: l[13], l[18] = l[18], l[13]
if l[13] > l[19]: l[13], l[19] = l[19], l[13]
if l[14] > l[15]: l[14], l[15] = l[15], l[14]
if l[14] > l[16]: l[14], l[16] = l[16], l[14]
if l[14] > l[17]: l[14], l[17] = l[17], l[14]
if l[14] > l[18]: l[14], l[18] = l[18], l[14]
if l[14] > l[19]: l[14], l[19] = l[19], l[14]
if l[15] > l[16]: l[15], l[16] = l[16], l[15]
if l[15] > l[17]: l[15], l[17] = l[17], l[15]
if l[15] > l[18]: l[15], l[18] = l[18], l[15]
if l[15] > l[19]: l[15], l[19] = l[19], l[15]
if l[16] > l[17]: l[16], l[17] = l[17], l[16]
if l[16] > l[18]: l[16], l[18] = l[18], l[16]
if l[16] > l[19]: l[16], l[19] = l[19], l[16]
if l[17] > l[18]: l[17], l[18] = l[18], l[17]
if l[17] > l[19]: l[17], l[19] = l[19], l[17]
if l[18] > l[19]: l[18], l[19] = l[19], l[18]
return l
print f([3, 12, 16, 8, 17, 6, 13, 0, 4, 15, 1, 14, 11, 18, 10, 5, 9, 7, 2, 19])
print f([2, 6, 11, 4, 7, 18, 19, 10, 15, 13, 3, 0, 17, 5, 8, 1, 14, 9, 16, 12])
print f([6, 12, 10, 7, 19, 15, 14, 5, 16, 1, 4, 11, 13, 2, 18, 9, 0, 3, 17, 8])
print f([1, 17, 13, 8, 9, 19, 18, 6, 5, 10, 12, 14, 2, 15, 0, 4, 11, 16, 7, 3])
print f([4, 7, 8, 6, 16, 10, 0, 5, 1, 3, 19, 2, 15, 12, 17, 11, 13, 18, 14, 9])
print f([14, 16, 11, 12, 5, 0, 10, 3, 1, 8, 17, 13, 4, 19, 9, 15, 6, 2, 7, 18])
print f([0, 3, 14, 9, 19, 13, 1, 7, 4, 17, 8, 16, 10, 5, 12, 6, 15, 11, 2, 18])
print f([17, 19, 3, 13, 15, 6, 16, 4, 0, 18, 8, 1, 9, 11, 2, 12, 7, 10, 5, 14])
print f([19, 5, 15, 1, 8, 2, 3, 12, 6, 14, 17, 7, 13, 10, 4, 18, 11, 9, 16, 0])
print f([13, 10, 11, 17, 19, 12, 14, 7, 5, 9, 2, 4, 18, 8, 6, 3, 16, 15, 0, 1])
```
#### File: pyston/minibenchmarks/raytrace.py
```python
import math
EPSILON = 0.00001
INF = 1.0e9
class Vector(object):
def __init__(self, initx, inity, initz):
self.x = initx
self.y = inity
self.z = initz
def __str__(self):
return '(%s,%s,%s)' % (self.x, self.y, self.z)
def __repr__(self):
return 'Vector(%s,%s,%s)' % (self.x, self.y, self.z)
def magnitude(self):
return math.sqrt(self.dot(self))
def __add__(self, other):
return Vector(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other):
return Vector(self.x - other.x, self.y - other.y, self.z - other.z)
def scale(self, factor):
return Vector(factor * self.x, factor * self.y, factor * self.z)
def dot(self, other):
return (self.x * other.x) + (self.y * other.y) + (self.z * other.z)
def cross(self, other):
return Vector(self.y * other.z - self.z * other.y,
self.z * other.x - self.x * other.z,
self.x * other.y - self.y * other.x)
def normalized(self):
return self.scale(1.0 / self.magnitude())
def negated(self):
return self.scale(-1)
def __eq__(self, other):
return (self.x == other.x) and (self.y == other.y) and (self.z == other.z)
def isVector(self):
return True
def isPoint(self):
return False
def reflectThrough(self, normal):
d = normal.scale(self.dot(normal))
return self - d.scale(2)
VZERO = Vector(0,0,0)
VRIGHT = Vector(1,0,0)
VUP = Vector(0,1,0)
VOUT = Vector(0,0,1)
if not (VRIGHT.reflectThrough(VUP) == VRIGHT):
print(1/0)
if not (Vector(-1,-1,0).reflectThrough(VUP) == Vector(-1,1,0)):
print(1/0)
class Point(object):
def __init__(self, initx, inity, initz):
self.x = initx
self.y = inity
self.z = initz
def __str__(self):
return '(%s,%s,%s)' % (self.x, self.y, self.z)
def __repr__(self):
return 'Point(%s,%s,%s)' % (self.x, self.y, self.z)
def __add__(self, other):
return Point(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other):
return Vector(self.x - other.x, self.y - other.y, self.z - other.z)
def isVector(self):
return False
def isPoint(self):
return True
class Sphere(object):
def __init__(self, centre, radius):
self.centre = centre
self.radius = radius
def __repr__(self):
return 'Sphere(%s,%s)' % (repr(self.centre), self.radius)
def intersectionTime(self, ray):
cp = self.centre - ray.point
v = cp.dot(ray.vector)
discriminant = (self.radius * self.radius) - (cp.dot(cp) - v*v)
if discriminant < 0:
return INF + 1
else:
return v - math.sqrt(discriminant)
def normalAt(self, p):
return (p - self.centre).normalized()
class Halfspace(object):
def __init__(self, point, normal):
self.point = point
self.normal = normal.normalized()
def __repr__(self):
return 'Halfspace(%s,%s)' % (repr(self.point), repr(self.normal))
def intersectionTime(self, ray):
v = ray.vector.dot(self.normal)
if v:
return 1 / -v
else:
return INF + 1
def normalAt(self, p):
return self.normal
class Ray(object):
def __init__(self, point, vector):
self.point = point
self.vector = vector.normalized()
def __repr__(self):
return 'Ray(%s,%s)' % (repr(self.point), repr(self.vector))
def pointAtTime(self, t):
return self.point + self.vector.scale(t)
PZERO = Point(0,0,0)
a = Vector(3,4,12)
b = Vector(1,1,1)
class PpmCanvas(object):
def __init__(self, width, height, filenameBase):
self.bytes = [0] * (width * height * 3)
for i in range(width * height):
self.bytes[i * 3 + 2] = 255
self.width = width
self.height = height
self.filenameBase = filenameBase
def plot(self, x, y, r, g, b):
i = ((self.height - y - 1) * self.width + x) * 3
self.bytes[i ] = max(0, min(255, int(r * 255)))
self.bytes[i+1] = max(0, min(255, int(g * 255)))
self.bytes[i+2] = max(0, min(255, int(b * 255)))
def save(self):
with open(self.filenameBase + '.ppm', 'wb') as f:
f.write('P6 %d %d 255\n' % (self.width, self.height))
l = []
for c in self.bytes:
l.append(chr(c))
f.write(''.join(l))
def firstIntersection(intersections):
result = intersections[0][0], INF+1, intersections[0][2]
for i in intersections:
candidateT = i[1]
if candidateT < INF and candidateT > -EPSILON:
if result[1] > INF or candidateT < result[1]:
result = i
return result
class Scene(object):
def __init__(self):
self.objects = []
self.lightPoints = []
self.position = Point(0, 1.8, 10)
self.lookingAt = PZERO
self.fieldOfView = 45
self.recursionDepth = 0
def lookAt(self, p):
self.lookingAt = p
def addObject(self, on, oi, sc):
self.objects.append((on, oi, sc))
def addLight(self, p):
self.lightPoints.append(p)
def render(self, canvas):
#print 'Computing field of view'
fovRadians = math.pi * (self.fieldOfView / 2.0) / 180.0
halfWidth = math.tan(fovRadians)
halfHeight = 0.75 * halfWidth
width = halfWidth * 2
height = halfHeight * 2
pixelWidth = width / (canvas.width - 1)
pixelHeight = height / (canvas.height - 1)
eye = Ray(self.position, self.lookingAt - self.position)
vpRight = eye.vector.cross(VUP).normalized()
vpUp = vpRight.cross(eye.vector).normalized()
#print 'Looping over pixels'
previousfraction = 0.0
for y in range(canvas.height):
currentfraction = 1.0 * y / canvas.height
if currentfraction - previousfraction > 0.05:
print('%d%% complete' % int(currentfraction * 100))
previousfraction = currentfraction
for x in range(canvas.width):
xcomp = vpRight.scale(x * pixelWidth - halfWidth)
ycomp = vpUp.scale(y * pixelHeight - halfHeight)
ray = Ray(eye.point, eye.vector + xcomp + ycomp)
colour = self.rayColour(ray)
canvas.plot(x,y,colour[0], colour[1], colour[2])
print('Complete.')
# canvas.save()
def rayColour(self, ray):
if self.recursionDepth > 3:
return (0.0,0.0,0.0)
self.recursionDepth = self.recursionDepth + 1
intersections = []
for on, oi, sc in self.objects:
intersections.append((on, oi(ray), sc))
# intersections = [(on, oi(ray), sc) for (on, oi, sc) in self.objects]
i = firstIntersection(intersections)
if i[1] > INF:
self.recursionDepth = self.recursionDepth - 1
return (0.0,0.0,0.0) ## the background colour
else:
(o, t, s) = i
p = ray.pointAtTime(t)
r = s(self, ray, p, o(p))
self.recursionDepth = self.recursionDepth - 1
return r
def _lightIsVisible(self, l, p):
for (on, oi, sc) in self.objects:
t = oi(Ray(p,l - p))
if t < INF and t > EPSILON:
return False
return True
def visibleLights(self, p):
result = []
for l in self.lightPoints:
if self._lightIsVisible(l, p):
result.append(l)
return result
def addColours(a, scale, b):
return (a[0] + scale * b[0],
a[1] + scale * b[1],
a[2] + scale * b[2])
class SimpleSurface(object):
def __init__(self, baseColour):
self.baseColour = baseColour
self.specularCoefficient = 0.2
self.lambertCoefficient = 0.6
self.ambientCoefficient = 1.0 - self.specularCoefficient - self.lambertCoefficient
def baseColourAt(self, p):
return self.baseColour
def colourAt(self, scene, ray, p, normal):
b = self.baseColourAt(p)
c = (0.0, 0.0, 0.0)
if self.specularCoefficient > 0:
reflectedRay = Ray(p, ray.vector.reflectThrough(normal))
#print p, normal, ray.vector, reflectedRay.vector
reflectedColour = scene.rayColour(reflectedRay)
c = addColours(c, self.specularCoefficient, reflectedColour)
if self.lambertCoefficient > 0:
lambertAmount = 0.0
for lightPoint in scene.visibleLights(p):
contribution = (lightPoint - p).normalized().dot(normal)
if contribution > 0:
lambertAmount = lambertAmount + contribution
lambertAmount = min(1,lambertAmount)
c = addColours(c, self.lambertCoefficient * lambertAmount, b)
if self.ambientCoefficient > 0:
c = addColours(c, self.ambientCoefficient, b)
return c
class CheckerboardSurface(object):
def __init__(self):
self.baseColour = (1.0, 1.0, 1.0)
self.specularCoefficient = 0.2
self.lambertCoefficient = 0.6
self.ambientCoefficient = 1.0 - self.specularCoefficient - self.lambertCoefficient
self.otherColour = (0.0, 0.0, 0.0)
self.checkSize = 1
def baseColourAt(self, p):
v = p - PZERO
v.scale(1.0 / self.checkSize)
if (int(abs(v.x) + 0.5) + \
int(abs(v.y) + 0.5) + \
int(abs(v.z) + 0.5)) \
% 2:
return self.otherColour
else:
return self.baseColour
def colourAt(self, scene, ray, p, normal):
b = self.baseColourAt(p)
c = (0.0,0.0,0.0)
if self.specularCoefficient > 0:
reflectedRay = Ray(p, ray.vector.reflectThrough(normal))
#print p, normal, ray.vector, reflectedRay.vector
reflectedColour = scene.rayColour(reflectedRay)
c = addColours(c, self.specularCoefficient, reflectedColour)
if self.lambertCoefficient > 0:
lambertAmount = 0.0
for lightPoint in scene.visibleLights(p):
contribution = (lightPoint - p).normalized().dot(normal)
if contribution > 0:
lambertAmount = lambertAmount + contribution
lambertAmount = min(1,lambertAmount)
c = addColours(c, self.lambertCoefficient * lambertAmount, b)
if self.ambientCoefficient > 0:
c = addColours(c, self.ambientCoefficient, b)
return c
def _main():
Canvas = PpmCanvas
# c = Canvas(4,2,'test_raytrace_tiny')
# c = Canvas(80,60,'test_raytrace_small')
# c = Canvas(160,120,'test_raytrace')
c = Canvas(320,240,'test_raytrace')
# c = Canvas(640,480,'test_raytrace_big')
s = Scene()
s.addLight(Point(30, 30, 10))
s.addLight(Point(-10, 100, 30))
s.lookAt(Point(0, 2, 0))
obj = Sphere(Point(1,3,-10), 2)
surf = SimpleSurface((1.0,1.0,0.0))
s.addObject(obj.normalAt, obj.intersectionTime, surf.colourAt)
for y in range(6):
obj = Sphere(Point(-3 - y * 0.4, 2.3, -5), 0.4)
surf = SimpleSurface((y / 6.0, 1 - y / 6.0, 0.5))
s.addObject(obj.normalAt, obj.intersectionTime, surf.colourAt)
obj = Halfspace(Point(0,0,0), VUP)
surf = CheckerboardSurface()
s.addObject(obj.normalAt, obj.intersectionTime, surf.colourAt)
s.render(c)
def main(n):
import time
times = []
for i in range(n):
t1 = time.time()
_main()
t2 = time.time()
times.append(t2 - t1)
return times
main(1)
```
#### File: test/integration/pytest_test.py
```python
import os, sys, subprocess, shutil
sys.path.append(os.path.dirname(__file__) + "/../lib")
from test_helper import create_virtenv, run_test
ENV_NAME = "pytest_test_env_" + os.path.basename(sys.executable)
ENV_DIR = os.path.abspath(ENV_NAME)
SRC_DIR = os.path.abspath(os.path.join(ENV_NAME, "src"))
PYTHON_EXE = os.path.abspath(os.path.join(ENV_NAME, "bin", "python"))
pkg = ["pytest==2.8.2"]
create_virtenv(ENV_NAME, pkg)
PYTEST_DIR = os.path.abspath(os.path.join(SRC_DIR, "pytest"))
test_dir = os.path.join(ENV_DIR, "tests")
if not os.path.exists(test_dir):
os.mkdir(test_dir)
with open(os.path.join(test_dir, "test_foo.py"), 'w') as f:
f.write("""
import pytest
@pytest.mark.skipif(True, reason="for fun")
def test_skipif_true():
1/0
""")
subprocess.check_call([os.path.join(ENV_DIR, "bin", "py.test"), test_dir])
# subprocess.check_call(["gdb", "--args", PYTHON_EXE, "-m", "pytest", test_dir])
```
#### File: test/tests/builtins.py
```python
import sys
__builtins__.aoeu = 1
print aoeu
__builtins__.True = 2
print True
print bool(1)
print bool(1) is True
__builtins__.__builtins__ = 1
print __builtins__
__builtins__ = 2
print __builtins__
import builtins_getitem
print all([]), all([True]), all([False]), all([None]), all([True, False, None])
print any([]), any([True]), any([False]), any([None]), any([True, False, None])
print sum(range(5))
print sum(range(5), 5)
class C(object):
def __init__(self, n):
self.n = n
def __add__(self, rhs):
self.n = (self.n, rhs.n)
return self
print sum([C(1), C(2), C(3)], C(4)).n
print zip()
print zip([1, 2, 3, 0], ["one", "two", "three"])
print zip([1, 2, 3, 0], ["one", "two", "three"], ["uno", "dos", "tres", "quatro"])
print filter(lambda x: x % 2, xrange(20))
print type(enumerate([]))
print list(enumerate(xrange(5, 10)))
print list(enumerate(start=-42, sequence=xrange(5, 10)))
print list(enumerate(range(3), 2**128)) # tests long
print list(enumerate(range(3), 2**63-1)) # tests start with int and than switch to long
# If the first argument is None, filter calls checks for truthiness (ie is equivalent to passing 'bool')
print filter(None, xrange(-5, 5))
print filter(None, unicode("12"))
print isinstance(1, int)
print isinstance(1, (float, int))
print isinstance(1, (float, (), (int, 3), 4))
print pow(11, 42)
print pow(11, 42, 75)
print divmod(5, 2)
print divmod(5L, -2)
try:
divmod(1, "")
except TypeError, e:
print e
def G():
yield "A"; yield "B"; yield "C"
print list(enumerate(G()))
print next(iter([]), "default")
print next(iter([]), None)
print next(iter([1]), "default")
class C(object):
def __init__(self):
self.a = 1
print vars(C()).items()
try:
print vars(42)
except TypeError, e:
print e
print globals().get("not a real variable")
print globals().get("not a real variable", 1)
print globals().has_key("C"), globals().has_key("CC")
print hex(12345)
print oct(234)
print hex(0)
print oct(0) # This should not add an additional leading 0, ie should return "0" not "00"
print abs((-sys.maxint)-1)
try:
print hex([])
except TypeError, e:
print e
class Iterable(object):
def __iter__(self):
return self
def next(self):
return 1
i = Iterable()
it = iter(i)
print it is i
# check that builtins don't bind
class C(object):
s = sorted
c = C()
print c.s([3,2,1])
l = range(5)
print sorted(l, key=lambda x:-x)
print l
print bytes
print bytes is str
print repr(b'1234')
print callable(1)
print callable(int)
print callable(lambda: 1)
print range(5L, 7L)
for n in [0, 1, 2, 3, 4, 5]:
print round(-1.1, n), round(-1.9, n), round(0.5, n), round(-0.5, n), round(-0.123456789, n), round(1, n)
print list(iter(xrange(100).__iter__().next, 20))
print bytearray(xrange(256))
l = [2, 1, 3]
print apply(sorted, [l])
print apply(sorted, [l], { "reverse" : True })
print format(5.0, '+')
print format(5.011111111111, '+.6')
print format("abc", '')
print format(0, str(10))
print '{n}'.format(n=None)
print hash(1L)
def C(long):
def __hash__(self):
return self
print hash(2L)
try:
print hash({})
except TypeError as e:
print e
try:
print hash(set())
except TypeError as e:
print e
# Thankfully, setting __builtins__ has no effect:
__builtins__ = {'zzz': 2}
try:
print zzz
assert 0
except NameError as e:
print "caught NameError"
```
#### File: test/tests/compare_order.py
```python
class A(object):
def __eq__(self, rhs):
return True
class B(object):
def __eq__(self, lhs):
return False
print A() == B()
print B() == A()
print A() in [B()]
print B() in [A()]
print A() in (B(),)
print B() in (A(),)
print A() in {B(): 1}
print B() in {A(): 1}
print A() in {B()}
print B() in {A()}
```
#### File: test/tests/compile_test2.py
```python
import unittest
from test import test_support
class TestSpecifics(unittest.TestCase):
def test_exec_functional_style(self):
# Exec'ing a tuple of length 2 works.
g = {'b': 2}
exec("a = b + 1", g)
self.assertEqual(g['a'], 3)
# As does exec'ing a tuple of length 3.
l = {'b': 3}
g = {'b': 5, 'c': 7}
exec("a = b + c", g, l)
self.assertNotIn('a', g)
self.assertEqual(l['a'], 10)
# Tuples not of length 2 or 3 are invalid.
with self.assertRaises(TypeError):
exec("a = b + 1",)
with self.assertRaises(TypeError):
exec("a = b + 1", {}, {}, {})
# Can't mix and match the two calling forms.
g = {'a': 3, 'b': 4}
l = {}
with self.assertRaises(TypeError):
exec("a = b + 1", g) in g
with self.assertRaises(TypeError):
exec("a = b + 1", g, l) in g, l
def test_exec_with_general_mapping_for_locals(self):
class M:
"Test mapping interface versus possible calls from eval()."
def __getitem__(self, key):
if key == 'a':
return 12
raise KeyError
def __setitem__(self, key, value):
self.results = (key, value)
def keys(self):
return list('xyz')
m = M()
g = globals()
exec 'z = a' in g, m
self.assertEqual(m.results, ('z', 12))
try:
exec 'z = b' in g, m
except NameError:
pass
else:
self.fail('Did not detect a KeyError')
exec 'z = dir()' in g, m
self.assertEqual(m.results, ('z', list('xyz')))
exec 'z = globals()' in g, m
self.assertEqual(m.results, ('z', g))
exec 'z = locals()' in g, m
self.assertEqual(m.results, ('z', m))
try:
exec 'z = b' in m
except TypeError:
pass
else:
self.fail('Did not validate globals as a real dict')
class A:
"Non-mapping"
pass
m = A()
try:
exec 'z = a' in g, m
except TypeError:
pass
else:
self.fail('Did not validate locals as a mapping')
# Verify that dict subclasses work as well
class D(dict):
def __getitem__(self, key):
if key == 'a':
return 12
return dict.__getitem__(self, key)
d = D()
exec 'z = a' in g, d
self.assertEqual(d['z'], 12)
def test_unicode_encoding(self):
code = u"# -*- coding: utf-8 -*-\npass\n"
self.assertRaises(SyntaxError, compile, code, "tmp", "exec")
def test_main():
test_support.run_unittest(TestSpecifics)
if __name__ == "__main__":
# pyston change: remove duration in test output
# test_main()
import sys, StringIO, re
orig_stdout = sys.stdout
out = StringIO.StringIO()
sys.stdout = out
test_main()
sys.stdout = orig_stdout
print re.sub(" [.0-9]+s", " TIME", out.getvalue())
```
#### File: test/tests/cpython_oldstyle_getattr_crash.py
```python
class C:
def __getattr__(self, attr):
del self
print "C.__getattr__", attr
del D.__get__
raise AttributeError("our attribute error")
class D(object):
__get__ = C()
class E(object):
x = D()
try:
print E().x
except Exception as e:
print e
```
#### File: test/tests/dash_c.py
```python
import sys
import subprocess
me = sys.executable
with open('/dev/null')as ignore:
# We don't (yet?) require exact stderr or return code compatibility w/
# python. So we just check that we succeed or fail as appropriate.
def run(args):
code = 0 == subprocess.call([me] + args, stderr=ignore)
sys.stdout.flush()
print code
sys.stdout.flush()
run(["-Sc", "print 2 + 2"])
run(["-Sc", "import sys; print sys.argv", "hello", "world"])
run(["-Sc", "import sys; print sys.argv", "-c", "this is ignored"])
run(["-Sc"])
run(["-Sc", "-c"])
run(["-Sc", "this should not work"])
run(["-Sc", ";"])
run(["-Scprint 1"])
```
#### File: test/tests/deopt_namescope_tests.py
```python
try:
import __pyston__
__pyston__.setOption("OSR_THRESHOLD_BASELINE", 50)
__pyston__.setOption("REOPT_THRESHOLD_BASELINE", 50)
__pyston__.setOption("OSR_THRESHOLD_INTERPRETER", 50)
__pyston__.setOption("REOPT_THRESHOLD_INTERPRETER", 50)
__pyston__.setOption("SPECULATION_THRESHOLD", 10)
except ImportError:
pass
# This test makes sure that the boxedLocals survive a deopt.
# TODO Write a test case to make sure exc_info survives the deopt.
def f_with_name_scoping(o):
print "starting f"
exec "k = 5"
l = 6
try:
print o.a
if o.b:
raise Exception('')
except Exception, e:
print o.c
print e
print o.d
print sorted(locals().items())
print "k =", k
print l
print "Done"
def main():
class C(object):
def __repr__(self):
return "<C>"
c = C()
c.a = 1
c.b = 0
c.c = 3
c.d = 4
for i in xrange(300):
print i
if i == 60:
c.a = []
if i == 120:
c.b = 1
if i == 180:
c.c = []
if i == 240:
c.b = 0
c.d = 1.0
f_with_name_scoping(c)
main()
```
#### File: test/tests/dunder_descriptors.py
```python
def f1():
class D(object):
def __init__(self, n):
self.n = n
def __get__(self, obj, cls):
print "__get__()", obj is None, self.n
def desc(*args):
print "desc()", len(args)
return self.n
return desc
def __call__(self):
print "D.call"
return self.n
class C(object):
__hash__ = D(1)
__add__ = D(2)
__init__ = D(None)
print C.__init__()
c = C()
print C.__hash__()
print c.__hash__()
print hash(c)
print c + c
f1()
def f2():
print "\nf2"
class D(object):
def __call__(self, subcl):
print "call", subcl
return object.__new__(subcl)
def get(self, inst, owner):
print "__get__", inst, owner
def new(self):
print "new"
return object.__new__(owner)
return new
class C(object):
__new__ = D()
print type(C())
D.__get__ = get
print type(C())
f2()
def f3():
print "\nf3"
class D(object):
def __call__(self):
print "call"
return None
def get(self, inst, owner):
print "__get__", type(inst), owner
def init():
print "init"
return None
return init
class C(object):
__init__ = D()
print type(C())
D.__get__ = get
print type(C())
f3()
# misc tests:
import sys
sys.getrecursionlimit.__call__.__call__.__call__()
TypeError.__call__.__call__.__call__()
```
#### File: test/tests/finalizer_cycle.py
```python
import gc
finalized_at_least_once = False
class ObjWithFinalizerAndRef(object):
def __init__(self, index):
self.index = index
self.ref = None
def __del__(self):
global finalized_at_least_once
finalized_at_least_once = True
items_in_list = 100
# Make a lot of cycles
for _ in xrange(100):
# Create a finalizer cycle. We should break those arbitrarily.
objs = [ObjWithFinalizerAndRef(i) for i in xrange(items_in_list)]
for i in xrange(items_in_list):
objs[i].ref = objs[(i+1) % items_in_list]
gc.collect()
print "finished"
if not finalized_at_least_once:
raise Exception("should gc at least once - consider creating more cycles?")
```
#### File: test/tests/generator_abandonment.py
```python
print any(i == 5 for i in xrange(10))
# TODO: move this back to nonzero_exceptions when it's working again:
class MyException(Exception):
pass
class C(object):
def __init__(self, x):
self.x = x
def __nonzero__(self):
raise MyException(self.x)
def __repr__(self):
return "<C %r>" % self.x
try:
print list(1 for i in range(5) if C(7))
except MyException, e:
print e
```
#### File: test/tests/generator_cycle2.py
```python
import gc
def f(z):
l = ((lambda x, l: x**y)(z, l) for y in xrange(10))
return l
def test():
g = f(4)
print g.next()
return g
g = test()
print g.next()
gc.collect()
print gc.garbage
```
#### File: test/tests/generator_recursion_checking.py
```python
def test(n):
l = []
for i in xrange(n):
g = (i for i in xrange(5))
g.next()
l.append(g)
for i in xrange(3):
test(3500)
```
#### File: test/tests/large_dict.py
```python
def f():
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING '
'TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES BACKQUOTES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS '
'SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'cmp hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS1': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS2 '
'SPECIALMETHODS'),
'SEQUENCEMETHODS2': ('sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'COERCIONS': ('coercion-rules','CONVERSIONS'),
'CONVERSIONS': ('conversions', 'COERCIONS'),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS BACKQUOTES NUMBERS '
'TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'BACKQUOTES': ('string-conversions', 'repr str STRINGS LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr '
'ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS1'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS2'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'PRINTING': 'print',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
for i in xrange(100):
f()
```
#### File: test/tests/metaclasses.py
```python
class MM(type):
def __new__(*args):
print "MM.__new__", args[:3]
return type.__new__(*args)
def __call__(*args):
print "MM.__call__", args[:3]
return type.__call__(*args)
print "Made MM", type(MM)
class M(type):
__metaclass__ = MM
def __new__(*args):
print "M.__new__", args[:3]
return type.__new__(*args)
def __call__(*args):
print "M.__call__", args[:3]
return type.__call__(*args)
print "Made M", type(M)
class C(object):
__metaclass__ = M
print "Made C", type(C)
print isinstance(C, M)
print isinstance(C, type)
print isinstance(C, int)
def f(*args):
print "f()", args[:2]
class C(object):
# Metaclasses don't need to be type objects:
__metaclass__ = f
print C
print type.__call__(int, 1)
try:
type.__new__(1, 2, 3)
except TypeError, e:
print e
try:
type.__new__(int, 1, 2, 3)
except TypeError, e:
print e
class D():
__metaclass__ = type
print D
print D.__base__
print type("test", (), {})
print type("test", (), {"__module__":"fake"})
# test non str attr keys
t = type("test", (), {u"test" : 2, 1000L : 3, 1.0 : 4})
print t.__dict__[u"test"], t.test
print t.__dict__[1000L]
print t.__dict__[1.0]
```
#### File: test/tests/oargs_decref.py
```python
def f(a, b, c, d, e, *args):
print a, b, c, d, e, args
1/a
for i in xrange(-100, 100):
try:
f(i, 0, 0, 0, 0, 0)
except ZeroDivisionError:
pass
```
#### File: test/tests/osr_into_failed_speculation.py
```python
def xrange(n):
return n
def f(x):
y = xrange(x)
n = 10000
while n:
n -= 1
print y
print y + 1
f(11)
f(10)
```
#### File: test/tests/property.py
```python
class C(object):
def fget(self):
return 5
def fset(self, val):
print 'in fset, val =', val
x = property(fget, fset, None, "Doc String")
c = C()
print c.x
print C.x.__get__(c, C)
print type(C.x.__get__(None, C))
c.x = 7
print c.x
print C.x.__doc__
class C2(object):
@property
def x(self):
print "x1"
return 2
x1 = x
@x.setter
def x(self, value):
print "x2"
return 3
x2 = x
@x.deleter
def x(self):
print "x3"
c = C2()
print "These should all succeed:"
print c.x1
print c.x2
print c.x
try:
# This will fail since x1 is a copy that didn't have the setter set:
c.x1 = 1
except AttributeError, e:
print e
c.x2 = 1
c.x = 1
try:
# This will fail since x1 is a copy that didn't have the deleter set:
del c.x1
except AttributeError, e:
print e
try:
# This will fail since x1 is a copy that didn't have the deleter set:
del c.x2
except AttributeError, e:
print e
c.x = 1
class MyProperty(property):
pass
class C(object):
v = "empty"
@MyProperty
def p(self):
print "get"
return self.v
@p.setter
def p(self, value):
print "set"
self.v = "it " + value
c = C()
c.p = "worked"
print c.p
print 'test the setting of __doc__'
class C(object):
@property
def f(self):
"""doc string of f"""
print C.f.__doc__
print 'test the setting of __doc__ with a __get__'
class Desc(object):
def __get__(self, obj, typ):
print 'desc called'
return "blah"
class ObjWithDocDesc(object):
__doc__ = Desc()
class C(object):
f = property(ObjWithDocDesc)
print C.f.__doc__
print 'test the setting of __doc__ with a __get__ throwing an exception (should get swallowed)'
class Desc(object):
def __get__(self, obj, typ):
raise ValueError("arbitrary exception")
class ObjWithDocDesc(object):
__doc__ = Desc()
class C(object):
f = property(ObjWithDocDesc)
print C.f.__doc__
print 'test the setting of __doc__ with a __get__ throwing an exception (should not get swallowed)'
class Desc(object):
def __get__(self, obj, typ):
raise BaseException("not a subclass of Exception")
class ObjWithDocDesc(object):
__doc__ = Desc()
try:
class C(object):
f = property(ObjWithDocDesc)
except BaseException as e:
print e.message
print 'test the setting of a __doc__ when you copy it'
class Desc(object):
def __get__(self, obj, typ):
print 'desc called'
return "blah"
class ObjWithDocDesc(object):
__doc__ = Desc()
prop = property(ObjWithDocDesc)
print 'made prop'
print prop.__doc__
def g():
"""doc of g"""
return 5
prop2 = prop.getter(g)
print 'made prop2'
print prop2.__doc__
prop3 = prop.setter(lambda self, val : None)
print prop3.__doc__
prop4 = prop.deleter(lambda self, val : None)
print prop4.__doc__
print 'test the setting of a __doc__ when you copy it when using a subclass of property'
class PropertySubclass(property):
pass
class Desc(object):
def __get__(self, obj, typ):
print 'desc called'
return "blah"
class ObjWithDocDesc(object):
__doc__ = Desc()
prop = PropertySubclass(ObjWithDocDesc)
print 'made prop'
print prop.__doc__
def g():
"""doc of g"""
return 5
prop2 = prop.getter(g)
print 'made prop2'
print prop2.__doc__
prop3 = prop.setter(lambda self, val : None)
print prop3.__doc__
prop4 = prop.deleter(lambda self, val : None)
print prop4.__doc__
```
#### File: test/tests/pyframe_new_test.py
```python
import ctypes
def f():
pass
ctypes.pythonapi.PyFrame_New.restype = ctypes.py_object
ctypes.pythonapi.PyThreadState_Get.restype = ctypes.c_void_p
f = ctypes.pythonapi.PyFrame_New(
ctypes.c_void_p(ctypes.pythonapi.PyThreadState_Get()),
ctypes.py_object(f.func_code),
ctypes.py_object({'globals': True}),
ctypes.c_long(0)
)
print f.f_locals
```
#### File: test/tests/resurrection.py
```python
x = None
running = True
class C(object):
def __init__(self):
self.n = 0
def __del__(self):
if running:
global x
self.n += 1
print "__del__ #%d" % self.n
x = self
import gc
x = C()
for i in xrange(100):
x = None
gc.collect()
# print x
running = False
```
#### File: test/tests/setattr_patching_under.py
```python
class C(object):
pass
def set(o, a):
o.x = a
class D(object):
def __del__(self):
print "in __del__"
c = C()
c.a = 1
c.b = 2
c.c = 3
c.d = 4
c.e = 5
c.f = 6
c.g = 7
c.h = 8
set(c, 1)
print "done with __del__"
c = C()
# The first set() just adds the attribute
print 1
set(c, 1)
# The second set() rewrites the setattr to be a in-place set, and also adds the D object
print 2
set(c, D())
# This third set() will remove the D() object, so by the time the set() finishes,
# the patchpoint could be rewritten due to the set() in the D.__del__ destructor
print 3
set(c, 1)
print 4
```
#### File: test/tests/set.py
```python
s1 = {1, 1}
def sorted(s):
l = list(s)
l.sort()
return repr(l)
s1 = set() | set(range(3))
print sorted(s1)
s2 = set(range(1, 5))
print sorted(s2)
print repr(sorted(s1)), str(sorted(s1))
print sorted(s1 - s2)
print sorted(s2 - s1)
print sorted(s1 ^ s2)
print sorted(s1 & s2)
print sorted(s1 | s2)
print len(set(range(5)))
s = set(range(5))
print sorted(s)
s.add(3)
print sorted(s)
s.add("")
print len(s)
s.add(None)
print len(s)
print set([1])
for i in set([1]):
print i
s = frozenset(range(5))
print len(s)
print sorted(s)
print frozenset()
print hasattr(s, "remove")
print hasattr(s, "add")
print frozenset() | frozenset()
print set() | frozenset()
print frozenset() | set()
print set() | set()
for i in xrange(8):
print i, i in set(range(2, 5))
print i, i in frozenset(range(2, 5))
s = set(range(5))
print len(s)
s.clear()
print s
s.update((10, 15))
print sorted(s)
s.update((10, 15), range(8))
print sorted(s)
s.remove(6)
print sorted(s)
try:
s.remove(6)
except KeyError, e:
print e
def f2():
print {5}
f2()
s = set([])
s2 = s.copy()
s.add(1)
print s, s2
s1 = set([3, 5])
s2 = set([1, 5])
print sorted(s1.union(s2)), sorted(s1.intersection(s2))
print sorted(s1.union(range(5, 7))), sorted(s1.intersection(range(5, 7)))
print sorted(s2.union([], [], [], [])), sorted(s2.intersection())
s = frozenset([1, 5])
d = s.difference([1], [1], [2])
print d, len(s)
print
l = []
s = set(range(5))
while s:
l.append(s.pop())
l.sort()
print l
s = set([1])
s.discard(1)
print s
s.discard(1)
print s
s = set(range(10))
print s.difference_update(range(-3, 2), range(7, 23))
print sorted(s)
# Check set subclassing:
class MySet(set):
pass
class MyFrozenset(frozenset):
pass
s = s1 = set()
s |= MySet(range(2))
print sorted(s), sorted(s1)
s &= MySet(range(1))
print sorted(s), sorted(s1)
s ^= MySet(range(4))
print sorted(s), sorted(s1)
s -= MySet(range(3))
print sorted(s), sorted(s1)
try:
set() | range(5)
assert 0
except TypeError as e:
print e
compare_to = []
for i in xrange(10):
compare_to.append(set(range(i)))
compare_to.append(frozenset(range(i)))
compare_to.append(MySet(range(i)))
compare_to.append(MyFrozenset(range(i)))
compare_to.append(range(i))
compare_to.append(range(i, 10))
compare_to.append([0, 0, 1, 1])
for s1 in set(range(5)), frozenset(range(5)):
for s2 in compare_to:
print type(s2), sorted(s2), s1.issubset(s2), s1.issuperset(s2), sorted(s1.difference(s2)), s1.isdisjoint(s2), sorted(s1.union(s2)), sorted(s1.intersection(s2)), sorted(s1.symmetric_difference(s2))
print s1 == s2, s1 != s2
try:
print s1 < s2, s1 <= s2, s1 > s2, s1 >= s2
except Exception as e:
print e
f = float('nan')
s = set([f])
print f in s, f == list(s)[0]
for fn in (set.intersection_update, set.difference_update, set.symmetric_difference_update, set.__sub__,
set.__or__, set.__xor__, set.__and__):
s1 = set([3, 5])
s2 = set([1, 5])
r = fn(s1, s2)
if r:
print r,
print sorted(s1), sorted(s2)
def test_set_creation(base):
print "Testing with base =", base
# set.__new__ should not iterate through the argument.
# sqlalchemy overrides init and expects to be able to do the iteration there.
def g():
for i in xrange(5):
print "iterating", i
yield i
print "Calling __new__:"
s = base.__new__(base, g())
print "Calling __init__:"
s.__init__(g())
print "Trying subclassing"
class MySet(base):
def __new__(cls, g):
print "starting new"
r = base.__new__(cls, g)
print "ending new"
return r
def __init__(self, g):
print "starting init"
print list(g)
print MySet(g())
test_set_creation(set)
test_set_creation(frozenset)
set(**{})
try:
set(**dict(a=1))
except TypeError:
print "TypeError"
class MySet(set):
def __new__(cls, *args, **kwargs):
return set.__new__(cls, *args)
try:
MySet(a=1)
except TypeError as e:
print(e.message)
class SetSubclassWithKeywordArgs(set):
def __init__(self, iterable=[], newarg=None):
set.__init__(self, iterable)
SetSubclassWithKeywordArgs(newarg=1)
try:
frozenset(a=1)
except TypeError as e:
print(e.message)
class MyFrozenSet(frozenset):
def __new__(cls, *args, **kwargs):
return frozenset.__new__(cls, *args)
MyFrozenSet(a=1)
class FrozensetSubclassWithKeywordArgs(frozenset):
def __init__(self, iterable=[], newarg=None):
frozenset.__init__(self, iterable)
FrozensetSubclassWithKeywordArgs(newarg=1)
print(set() in frozenset([frozenset()]))
class MySet(set):
def __hash__(self):
print("calling __hash__")
return id(self)
print("Ready")
foo = MySet()
a = set()
a.add(foo)
print(a.remove(foo))
print(foo in set())
# Remove an item using a different key:
s = set()
s.add(1)
s.remove(1L)
s = set([1, 2, 3, 4])
s2 = set([3L, 4L, 5L, 6L])
s.symmetric_difference_update(s2)
# make sure we are inserting the tuple elements in reverse:
print {1, 1L}, {1L, 1}, set([1, 1L]), set([1L, 1])
s = {1}
s.add(1L)
print s
from api_test import set_size
s = set([1, 2, 3, 4])
print(set_size(s))
try:
{{}}
except Exception as e:
print e
```
#### File: test/tests/signal_test.py
```python
import signal
for k in sorted(dir(signal)):
if not k.startswith("SIG"):
continue
print k, getattr(signal, k)
print hasattr(signal, "alarm")
import time
import signal
def sig_handler(signum, stack):
print "inside sig_handler"
import sys, traceback
traceback.print_stack(stack)
sys.exit(0)
def f(lst):
signal.signal(signal.SIGALRM, sig_handler)
signal.setitimer(signal.ITIMER_REAL, 2, 1)
for x in lst:
time.sleep(x) #1
time.sleep(x) #2
f([0] * 100 + [10])
assert False, "shuld not get executed"
```
#### File: test/tests/threading_local_cleanup.py
```python
import thread
import _weakref
def f():
global r
l = thread._local()
class C(object):
pass
o = C()
r = _weakref.ref(o)
l.o = o
del o
print type(r())
del l
f()
print type(r())
```
#### File: test/tests/weakrefs.py
```python
import weakref
import array
import re
# from https://docs.python.org/2/library/weakref.html:
#
# Not all objects can be weakly referenced; those objects which can include class instances, functions written in Python (but not in C), methods (both bound and unbound), sets, frozensets, file objects, generators, type objects, DBcursor objects from the bsddb module, sockets, arrays, deques, regular expression pattern objects, and code objects.
#
# Changed in version 2.4: Added support for files, sockets, arrays, and patterns.
#
# Changed in version 2.7: Added support for thread.lock, threading.Lock, and code objects
#
# Several built-in types such as list and dict do not directly support weak references but can add support through subclassing
#
# CPython implementation detail: Other built-in types such as tuple and long do not support weak references even when subclassed
#
def test_wr(o, extra=None):
if extra is None:
extra = type(o)
try:
r = weakref.ref(o)
print "passed", extra
return r
except:
print "failed", extra
def test_subclass_wr(tp):
class test(tp): pass
test_wr(test(), "subclass of " + repr(tp))
def test():
pass
wr = test_wr(test)
print wr() == test
print weakref.getweakrefcount(test)
test_wr(1)
test_wr(1.)
test_wr(1L)
test_wr("hello world")
test_wr([1,2,3])
test_wr((1,2,2))
test_wr(set())
test_wr(frozenset())
test_wr((i*i for i in range(1000000)))
test_wr(set)
test_wr(file("/etc/passwd"))
class Old:
pass
test_wr(Old)
# missing: db cursor from the bsddb module
# missing: sockets
test_wr(array.array('d', [1.0, 2.0, 3.14]))
test_wr(re.compile('ab*'))
# compile isn't in pyston yet
#test_wr(compile('print "Hello, world"', '<string>', 'exec'))
# missing: thread.lock, threading.Lock
# skip these since we permit them, while cpython doesn't
#test_subclass_wr(long)
#test_subclass_wr(tuple)
#test_subclass_wr(list)
test_subclass_wr(int)
test_subclass_wr(float)
``` |
{
"source": "jmg-duarte/scrape-the-box",
"score": 3
} |
#### File: commands/search/discussion.py
```python
import json
import sys
from stb.htb import db
def search(thread_id, search_term, db_name):
db.conn_use(
db_name,
db.cursor_exec(
db.cursor_fts_comments(thread_id, search_term, _search_callback)
),
)
def _search_callback(results):
json.dump(
_results_to_json_array(results), indent=" ", fp=sys.stdout,
)
def _results_to_json_array(results):
return list(
map(lambda result: {"author": result[0], "message": result[1]}, results)
)
```
#### File: stb/htb/db.py
```python
import sqlite3
from typing import Iterable
from stb.htb.discussion import Discussion
from stb.htb.comment import Comment
CREATE_TRIGGER_DISCUSSIONS = """
CREATE TRIGGER IF NOT EXISTS "trigger_discussions" BEFORE
INSERT ON "discussions"
FOR EACH ROW WHEN (
NOT EXISTS(
SELECT 1 FROM discussions WHERE discussions.id IS NEW.id
)
)
BEGIN
INSERT INTO "v_discussions" VALUES (NEW.author, NEW.title);
END
"""
CREATE_TRIGGER_COMMENTS = """
CREATE TRIGGER IF NOT EXISTS "trigger_comments_{tid}" BEFORE
INSERT ON "comments"
FOR EACH ROW WHEN (
NOT EXISTS(
SELECT 1 FROM comments WHERE comments.permalink IS NEW.permalink
)
)
BEGIN
INSERT INTO "v_comments_{tid}" VALUES (NEW.author, NEW.message);
END
"""
CREATE_TABLE_DISCUSSIONS = """
CREATE TABLE IF NOT EXISTS "discussions" (
"id" INTEGER NOT NULL UNIQUE,
"author" TEXT NOT NULL,
"permalink" TEXT NOT NULL,
"title" TEXT NOT NULL,
PRIMARY KEY("id") ON CONFLICT IGNORE
)
"""
CREATE_TABLE_COMMENTS = """
CREATE TABLE IF NOT EXISTS "comments" (
"discussion_id" INTEGER NOT NULL,
"author" TEXT NOT NULL,
"message" TEXT NOT NULL,
"permalink" TEXT NOT NULL UNIQUE,
"datetime" TEXT NOT NULL,
PRIMARY KEY("permalink")
)
"""
CREATE_VIRTUAL_TABLE_DISCUSSIONS = """
CREATE VIRTUAL TABLE IF NOT EXISTS "v_discussions"
USING fts5(author, title);
"""
CREATE_VIRTUAL_TABLE_COMMENTS = """
CREATE VIRTUAL TABLE IF NOT EXISTS "v_comments_{tid}"
USING fts5(author, message);
"""
INSERT_INTO_DISCUSSIONS = """
INSERT INTO "discussions" (
"id",
"author",
"permalink",
"title"
) VALUES (?, ?, ?, ?)
ON CONFLICT DO NOTHING;
"""
INSERT_INTO_COMMENTS = """
INSERT INTO "comments" (
"discussion_id",
"permalink",
"datetime",
"author",
"message"
) VALUES (?, ?, ?, ?, ?)
ON CONFLICT DO NOTHING;
"""
INSERT_INTO_VIRTUAL_DISCUSSIONS = """
INSERT INTO "v_discussions" (
"author",
"title"
) VALUES (?, ?);
"""
INSERT_INTO_VIRTUAL_COMMENTS = """
INSERT INTO "v_comments_{}" (
"author",
"message"
) VALUES (?, ?);
"""
SELECT_DISCUSSIONS = """
SELECT *
FROM "v_discussions"
WHERE title MATCH ?
ORDER BY rank
"""
SELECT_COMMENTS = """
SELECT *
FROM "v_comments_{tid}"
WHERE title MATCH ?
ORDER BY rank
"""
def _get_runnable(f):
def _f_with_parameters(*args, **kwargs):
def _using_required(required):
f(required, *args, **kwargs)
return _using_required
return _f_with_parameters
def conn_use(db_name, *runnables):
with sqlite3.connect(db_name) as conn:
for runnable in runnables:
runnable(conn)
conn.commit()
# conn.close()
@_get_runnable
def load_fts(conn):
conn.enable_load_extension(True)
conn.load_extension("/usr/local/lib/stb/fts5")
conn.enable_load_extension(False)
@_get_runnable
def cursor_exec(conn, *runnables):
cursor = conn.cursor()
# TODO maybe return a list of runnable results?
# TODO maybe create another method
for r in runnables:
r(cursor)
@_get_runnable
def cursor_create_discussions(cursor):
cursor.execute(CREATE_TABLE_DISCUSSIONS)
cursor.execute(CREATE_VIRTUAL_TABLE_DISCUSSIONS)
cursor.execute(CREATE_TRIGGER_DISCUSSIONS)
@_get_runnable
def cursor_create_comments(cursor, discussion_id):
cursor.execute(CREATE_TABLE_COMMENTS)
cursor.execute(CREATE_VIRTUAL_TABLE_COMMENTS.format(tid=discussion_id))
cursor.execute(CREATE_TRIGGER_COMMENTS.format(tid=discussion_id))
@_get_runnable
def cursor_insert_comments(cursor, comments: Iterable[Comment]):
comments = map(lambda c: c.as_tuple(), comments)
cursor.executemany(INSERT_INTO_COMMENTS, comments)
@_get_runnable
def cursor_insert_discussions(cursor, discussions: Iterable[Discussion]):
discussions = map(lambda d: d.as_tuple(), discussions)
cursor.executemany(INSERT_INTO_DISCUSSIONS, discussions)
@_get_runnable
def cursor_insert_virtual_discussions(cursor, discussions: Iterable[Discussion]):
discussions = map(lambda d: (d.author, d.title), discussions)
# cursor.executemany(INSERT_INTO_VIRTUAL_DISCUSSIONS, discussions)
@_get_runnable
def cursor_insert_virtual_comments(cursor, discussion_id, comments: Iterable[Comment]):
comments = map(lambda d: (d.author, d.message), comments)
# cursor.executemany(INSERT_INTO_VIRTUAL_COMMENTS.format(discussion_id), comments)
@_get_runnable
def cursor_fts_discussions(cursor, search_term, callback):
cursor.execute(
SELECT_DISCUSSIONS, (search_term,),
)
callback(cursor.fetchall())
@_get_runnable
def cursor_fts_comments(cursor, thread_id, search_term, callback):
try:
cursor.execute(
SELECT_COMMENTS.format(tid=thread_id), (thread_id, search_term,),
)
except sqlite3.OperationalError as e:
print(
f"Thread {thread_id} was not found.\nHave you tried downloading it with:\n\n\tstb fetch thread {thread_id} --db"
)
return
callback(cursor.fetchall())
``` |
{
"source": "jmg-duarte/sqlalchemy",
"score": 2
} |
#### File: sqlalchemy/sql/visitors.py
```python
from __future__ import annotations
from collections import deque
from enum import Enum
import itertools
import operator
import typing
from typing import Any
from typing import Callable
from typing import cast
from typing import ClassVar
from typing import Collection
from typing import Dict
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Mapping
from typing import Optional
from typing import overload
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
from .. import exc
from .. import util
from ..util import langhelpers
from ..util._has_cy import HAS_CYEXTENSION
from ..util.typing import Literal
from ..util.typing import Protocol
from ..util.typing import Self
if typing.TYPE_CHECKING or not HAS_CYEXTENSION:
from ._py_util import prefix_anon_map as prefix_anon_map
from ._py_util import cache_anon_map as anon_map
else:
from sqlalchemy.cyextension.util import prefix_anon_map as prefix_anon_map
from sqlalchemy.cyextension.util import cache_anon_map as anon_map
__all__ = [
"iterate",
"traverse_using",
"traverse",
"cloned_traverse",
"replacement_traverse",
"Visitable",
"ExternalTraversal",
"InternalTraversal",
"anon_map",
]
class _CompilerDispatchType(Protocol):
def __call__(_self, self: Visitable, visitor: Any, **kw: Any) -> Any:
...
class Visitable:
"""Base class for visitable objects.
:class:`.Visitable` is used to implement the SQL compiler dispatch
functions. Other forms of traversal such as for cache key generation
are implemented separately using the :class:`.HasTraverseInternals`
interface.
.. versionchanged:: 2.0 The :class:`.Visitable` class was named
:class:`.Traversible` in the 1.4 series; the name is changed back
to :class:`.Visitable` in 2.0 which is what it was prior to 1.4.
Both names remain importable in both 1.4 and 2.0 versions.
"""
__slots__ = ()
__visit_name__: str
_original_compiler_dispatch: _CompilerDispatchType
if typing.TYPE_CHECKING:
def _compiler_dispatch(self, visitor: Any, **kw: Any) -> str:
...
def __init_subclass__(cls) -> None:
if "__visit_name__" in cls.__dict__:
cls._generate_compiler_dispatch()
super().__init_subclass__()
@classmethod
def _generate_compiler_dispatch(cls) -> None:
visit_name = cls.__visit_name__
if "_compiler_dispatch" in cls.__dict__:
# class has a fixed _compiler_dispatch() method.
# copy it to "original" so that we can get it back if
# sqlalchemy.ext.compiles overrides it.
cls._original_compiler_dispatch = cls._compiler_dispatch
return
if not isinstance(visit_name, str):
raise exc.InvalidRequestError(
f"__visit_name__ on class {cls.__name__} must be a string "
"at the class level"
)
name = "visit_%s" % visit_name
getter = operator.attrgetter(name)
def _compiler_dispatch(
self: Visitable, visitor: Any, **kw: Any
) -> str:
"""Look for an attribute named "visit_<visit_name>" on the
visitor, and call it with the same kw params.
"""
try:
meth = getter(visitor)
except AttributeError as err:
return visitor.visit_unsupported_compilation(self, err, **kw) # type: ignore # noqa: E501
else:
return meth(self, **kw) # type: ignore # noqa: E501
cls._compiler_dispatch = ( # type: ignore
cls._original_compiler_dispatch
) = _compiler_dispatch
def __class_getitem__(cls, key: str) -> Any:
# allow generic classes in py3.9+
return cls
class InternalTraversal(Enum):
r"""Defines visitor symbols used for internal traversal.
The :class:`.InternalTraversal` class is used in two ways. One is that
it can serve as the superclass for an object that implements the
various visit methods of the class. The other is that the symbols
themselves of :class:`.InternalTraversal` are used within
the ``_traverse_internals`` collection. Such as, the :class:`.Case`
object defines ``_traverse_internals`` as ::
_traverse_internals = [
("value", InternalTraversal.dp_clauseelement),
("whens", InternalTraversal.dp_clauseelement_tuples),
("else_", InternalTraversal.dp_clauseelement),
]
Above, the :class:`.Case` class indicates its internal state as the
attributes named ``value``, ``whens``, and ``else_``. They each
link to an :class:`.InternalTraversal` method which indicates the type
of datastructure referred towards.
Using the ``_traverse_internals`` structure, objects of type
:class:`.InternalTraversible` will have the following methods automatically
implemented:
* :meth:`.HasTraverseInternals.get_children`
* :meth:`.HasTraverseInternals._copy_internals`
* :meth:`.HasCacheKey._gen_cache_key`
Subclasses can also implement these methods directly, particularly for the
:meth:`.HasTraverseInternals._copy_internals` method, when special steps
are needed.
.. versionadded:: 1.4
"""
dp_has_cache_key = "HC"
"""Visit a :class:`.HasCacheKey` object."""
dp_has_cache_key_list = "HL"
"""Visit a list of :class:`.HasCacheKey` objects."""
dp_clauseelement = "CE"
"""Visit a :class:`_expression.ClauseElement` object."""
dp_fromclause_canonical_column_collection = "FC"
"""Visit a :class:`_expression.FromClause` object in the context of the
``columns`` attribute.
The column collection is "canonical", meaning it is the originally
defined location of the :class:`.ColumnClause` objects. Right now
this means that the object being visited is a
:class:`_expression.TableClause`
or :class:`_schema.Table` object only.
"""
dp_clauseelement_tuples = "CTS"
"""Visit a list of tuples which contain :class:`_expression.ClauseElement`
objects.
"""
dp_clauseelement_list = "CL"
"""Visit a list of :class:`_expression.ClauseElement` objects.
"""
dp_clauseelement_tuple = "CT"
"""Visit a tuple of :class:`_expression.ClauseElement` objects.
"""
dp_executable_options = "EO"
dp_with_context_options = "WC"
dp_fromclause_ordered_set = "CO"
"""Visit an ordered set of :class:`_expression.FromClause` objects. """
dp_string = "S"
"""Visit a plain string value.
Examples include table and column names, bound parameter keys, special
keywords such as "UNION", "UNION ALL".
The string value is considered to be significant for cache key
generation.
"""
dp_string_list = "SL"
"""Visit a list of strings."""
dp_anon_name = "AN"
"""Visit a potentially "anonymized" string value.
The string value is considered to be significant for cache key
generation.
"""
dp_boolean = "B"
"""Visit a boolean value.
The boolean value is considered to be significant for cache key
generation.
"""
dp_operator = "O"
"""Visit an operator.
The operator is a function from the :mod:`sqlalchemy.sql.operators`
module.
The operator value is considered to be significant for cache key
generation.
"""
dp_type = "T"
"""Visit a :class:`.TypeEngine` object
The type object is considered to be significant for cache key
generation.
"""
dp_plain_dict = "PD"
"""Visit a dictionary with string keys.
The keys of the dictionary should be strings, the values should
be immutable and hashable. The dictionary is considered to be
significant for cache key generation.
"""
dp_dialect_options = "DO"
"""Visit a dialect options structure."""
dp_string_clauseelement_dict = "CD"
"""Visit a dictionary of string keys to :class:`_expression.ClauseElement`
objects.
"""
dp_string_multi_dict = "MD"
"""Visit a dictionary of string keys to values which may either be
plain immutable/hashable or :class:`.HasCacheKey` objects.
"""
dp_annotations_key = "AK"
"""Visit the _annotations_cache_key element.
This is a dictionary of additional information about a ClauseElement
that modifies its role. It should be included when comparing or caching
objects, however generating this key is relatively expensive. Visitors
should check the "_annotations" dict for non-None first before creating
this key.
"""
dp_plain_obj = "PO"
"""Visit a plain python object.
The value should be immutable and hashable, such as an integer.
The value is considered to be significant for cache key generation.
"""
dp_named_ddl_element = "DD"
"""Visit a simple named DDL element.
The current object used by this method is the :class:`.Sequence`.
The object is only considered to be important for cache key generation
as far as its name, but not any other aspects of it.
"""
dp_prefix_sequence = "PS"
"""Visit the sequence represented by :class:`_expression.HasPrefixes`
or :class:`_expression.HasSuffixes`.
"""
dp_table_hint_list = "TH"
"""Visit the ``_hints`` collection of a :class:`_expression.Select`
object.
"""
dp_setup_join_tuple = "SJ"
dp_memoized_select_entities = "ME"
dp_statement_hint_list = "SH"
"""Visit the ``_statement_hints`` collection of a
:class:`_expression.Select`
object.
"""
dp_unknown_structure = "UK"
"""Visit an unknown structure.
"""
dp_dml_ordered_values = "DML_OV"
"""Visit the values() ordered tuple list of an
:class:`_expression.Update` object."""
dp_dml_values = "DML_V"
"""Visit the values() dictionary of a :class:`.ValuesBase`
(e.g. Insert or Update) object.
"""
dp_dml_multi_values = "DML_MV"
"""Visit the values() multi-valued list of dictionaries of an
:class:`_expression.Insert` object.
"""
dp_propagate_attrs = "PA"
"""Visit the propagate attrs dict. This hardcodes to the particular
elements we care about right now."""
"""Symbols that follow are additional symbols that are useful in
caching applications.
Traversals for :class:`_expression.ClauseElement` objects only need to use
those symbols present in :class:`.InternalTraversal`. However, for
additional caching use cases within the ORM, symbols dealing with the
:class:`.HasCacheKey` class are added here.
"""
dp_ignore = "IG"
"""Specify an object that should be ignored entirely.
This currently applies function call argument caching where some
arguments should not be considered to be part of a cache key.
"""
dp_inspectable = "IS"
"""Visit an inspectable object where the return value is a
:class:`.HasCacheKey` object."""
dp_multi = "M"
"""Visit an object that may be a :class:`.HasCacheKey` or may be a
plain hashable object."""
dp_multi_list = "MT"
"""Visit a tuple containing elements that may be :class:`.HasCacheKey` or
may be a plain hashable object."""
dp_has_cache_key_tuples = "HT"
"""Visit a list of tuples which contain :class:`.HasCacheKey`
objects.
"""
dp_inspectable_list = "IL"
"""Visit a list of inspectable objects which upon inspection are
HasCacheKey objects."""
_TraverseInternalsType = List[Tuple[str, InternalTraversal]]
"""a structure that defines how a HasTraverseInternals should be
traversed.
This structure consists of a list of (attributename, internaltraversal)
tuples, where the "attributename" refers to the name of an attribute on an
instance of the HasTraverseInternals object, and "internaltraversal" refers
to an :class:`.InternalTraversal` enumeration symbol defining what kind
of data this attribute stores, which indicates to the traverser how it should
be handled.
"""
class HasTraverseInternals:
"""base for classes that have a "traverse internals" element,
which defines all kinds of ways of traversing the elements of an object.
Compared to :class:`.Visitable`, which relies upon an external visitor to
define how the object is travered (i.e. the :class:`.SQLCompiler`), the
:class:`.HasTraverseInternals` interface allows classes to define their own
traversal, that is, what attributes are accessed and in what order.
"""
__slots__ = ()
_traverse_internals: _TraverseInternalsType
_is_immutable: bool = False
@util.preload_module("sqlalchemy.sql.traversals")
def get_children(
self, *, omit_attrs: Tuple[str, ...] = (), **kw: Any
) -> Iterable[HasTraverseInternals]:
r"""Return immediate child :class:`.visitors.HasTraverseInternals`
elements of this :class:`.visitors.HasTraverseInternals`.
This is used for visit traversal.
\**kw may contain flags that change the collection that is
returned, for example to return a subset of items in order to
cut down on larger traversals, or to return child items from a
different context (such as schema-level collections instead of
clause-level).
"""
traversals = util.preloaded.sql_traversals
try:
traverse_internals = self._traverse_internals
except AttributeError:
# user-defined classes may not have a _traverse_internals
return []
dispatch = traversals._get_children.run_generated_dispatch
return itertools.chain.from_iterable(
meth(obj, **kw)
for attrname, obj, meth in dispatch(
self, traverse_internals, "_generated_get_children_traversal"
)
if attrname not in omit_attrs and obj is not None
)
class _InternalTraversalDispatchType(Protocol):
def __call__(s, self: object, visitor: HasTraversalDispatch) -> Any:
...
class HasTraversalDispatch:
r"""Define infrastructure for classes that perform internal traversals
.. versionadded:: 2.0
"""
__slots__ = ()
_dispatch_lookup: ClassVar[Dict[Union[InternalTraversal, str], str]] = {}
def dispatch(self, visit_symbol: InternalTraversal) -> Callable[..., Any]:
"""Given a method from :class:`.HasTraversalDispatch`, return the
corresponding method on a subclass.
"""
name = _dispatch_lookup[visit_symbol]
return getattr(self, name, None) # type: ignore
def run_generated_dispatch(
self,
target: object,
internal_dispatch: _TraverseInternalsType,
generate_dispatcher_name: str,
) -> Any:
dispatcher: _InternalTraversalDispatchType
try:
dispatcher = target.__class__.__dict__[generate_dispatcher_name]
except KeyError:
# traversals.py -> _preconfigure_traversals()
# may be used to run these ahead of time, but
# is not enabled right now.
# this block will generate any remaining dispatchers.
dispatcher = self.generate_dispatch(
target.__class__, internal_dispatch, generate_dispatcher_name
)
return dispatcher(target, self)
def generate_dispatch(
self,
target_cls: Type[object],
internal_dispatch: _TraverseInternalsType,
generate_dispatcher_name: str,
) -> _InternalTraversalDispatchType:
dispatcher = self._generate_dispatcher(
internal_dispatch, generate_dispatcher_name
)
# assert isinstance(target_cls, type)
setattr(target_cls, generate_dispatcher_name, dispatcher)
return dispatcher
def _generate_dispatcher(
self, internal_dispatch: _TraverseInternalsType, method_name: str
) -> _InternalTraversalDispatchType:
names = []
for attrname, visit_sym in internal_dispatch:
meth = self.dispatch(visit_sym)
if meth:
visit_name = _dispatch_lookup[visit_sym]
names.append((attrname, visit_name))
code = (
(" return [\n")
+ (
", \n".join(
" (%r, self.%s, visitor.%s)"
% (attrname, attrname, visit_name)
for attrname, visit_name in names
)
)
+ ("\n ]\n")
)
meth_text = ("def %s(self, visitor):\n" % method_name) + code + "\n"
return cast(
_InternalTraversalDispatchType,
langhelpers._exec_code_in_env(meth_text, {}, method_name),
)
ExtendedInternalTraversal = InternalTraversal
def _generate_traversal_dispatch() -> None:
lookup = _dispatch_lookup
for sym in InternalTraversal:
key = sym.name
if key.startswith("dp_"):
visit_key = key.replace("dp_", "visit_")
sym_name = sym.value
assert sym_name not in lookup, sym_name
lookup[sym] = lookup[sym_name] = visit_key
_dispatch_lookup = HasTraversalDispatch._dispatch_lookup
_generate_traversal_dispatch()
class ExternallyTraversible(HasTraverseInternals, Visitable):
__slots__ = ()
_annotations: Collection[Any] = ()
if typing.TYPE_CHECKING:
def get_children(
self, *, omit_attrs: Tuple[str, ...] = (), **kw: Any
) -> Iterable[ExternallyTraversible]:
...
def _clone(self: Self, **kw: Any) -> Self:
"""clone this element"""
raise NotImplementedError()
def _copy_internals(
self, *, omit_attrs: Tuple[str, ...] = (), **kw: Any
) -> None:
"""Reassign internal elements to be clones of themselves.
Called during a copy-and-traverse operation on newly
shallow-copied elements to create a deep copy.
The given clone function should be used, which may be applying
additional transformations to the element (i.e. replacement
traversal, cloned traversal, annotations).
"""
raise NotImplementedError()
_ET = TypeVar("_ET", bound=ExternallyTraversible)
_TraverseCallableType = Callable[[_ET], None]
class _CloneCallableType(Protocol):
def __call__(self, element: _ET, **kw: Any) -> _ET:
...
class _TraverseTransformCallableType(Protocol):
def __call__(
self, element: ExternallyTraversible, **kw: Any
) -> Optional[ExternallyTraversible]:
...
_ExtT = TypeVar("_ExtT", bound="ExternalTraversal")
class ExternalTraversal:
"""Base class for visitor objects which can traverse externally using
the :func:`.visitors.traverse` function.
Direct usage of the :func:`.visitors.traverse` function is usually
preferred.
"""
__traverse_options__: Dict[str, Any] = {}
_next: Optional[ExternalTraversal]
def traverse_single(self, obj: Visitable, **kw: Any) -> Any:
for v in self.visitor_iterator:
meth = getattr(v, "visit_%s" % obj.__visit_name__, None)
if meth:
return meth(obj, **kw)
def iterate(
self, obj: Optional[ExternallyTraversible]
) -> Iterator[ExternallyTraversible]:
"""Traverse the given expression structure, returning an iterator
of all elements.
"""
return iterate(obj, self.__traverse_options__)
@overload
def traverse(self, obj: Literal[None]) -> None:
...
@overload
def traverse(self, obj: ExternallyTraversible) -> ExternallyTraversible:
...
def traverse(
self, obj: Optional[ExternallyTraversible]
) -> Optional[ExternallyTraversible]:
"""Traverse and visit the given expression structure."""
return traverse(obj, self.__traverse_options__, self._visitor_dict)
@util.memoized_property
def _visitor_dict(self) -> Dict[str, _TraverseCallableType[Any]]:
visitors = {}
for name in dir(self):
if name.startswith("visit_"):
visitors[name[6:]] = getattr(self, name)
return visitors
@property
def visitor_iterator(self) -> Iterator[ExternalTraversal]:
"""Iterate through this visitor and each 'chained' visitor."""
v: Optional[ExternalTraversal] = self
while v:
yield v
v = getattr(v, "_next", None)
def chain(self: _ExtT, visitor: ExternalTraversal) -> _ExtT:
"""'Chain' an additional ExternalTraversal onto this ExternalTraversal
The chained visitor will receive all visit events after this one.
"""
tail = list(self.visitor_iterator)[-1]
tail._next = visitor
return self
class CloningExternalTraversal(ExternalTraversal):
"""Base class for visitor objects which can traverse using
the :func:`.visitors.cloned_traverse` function.
Direct usage of the :func:`.visitors.cloned_traverse` function is usually
preferred.
"""
def copy_and_process(
self, list_: List[ExternallyTraversible]
) -> List[ExternallyTraversible]:
"""Apply cloned traversal to the given list of elements, and return
the new list.
"""
return [self.traverse(x) for x in list_]
@overload
def traverse(self, obj: Literal[None]) -> None:
...
@overload
def traverse(self, obj: ExternallyTraversible) -> ExternallyTraversible:
...
def traverse(
self, obj: Optional[ExternallyTraversible]
) -> Optional[ExternallyTraversible]:
"""Traverse and visit the given expression structure."""
return cloned_traverse(
obj, self.__traverse_options__, self._visitor_dict
)
class ReplacingExternalTraversal(CloningExternalTraversal):
"""Base class for visitor objects which can traverse using
the :func:`.visitors.replacement_traverse` function.
Direct usage of the :func:`.visitors.replacement_traverse` function is
usually preferred.
"""
def replace(
self, elem: ExternallyTraversible
) -> Optional[ExternallyTraversible]:
"""Receive pre-copied elements during a cloning traversal.
If the method returns a new element, the element is used
instead of creating a simple copy of the element. Traversal
will halt on the newly returned element if it is re-encountered.
"""
return None
@overload
def traverse(self, obj: Literal[None]) -> None:
...
@overload
def traverse(self, obj: ExternallyTraversible) -> ExternallyTraversible:
...
def traverse(
self, obj: Optional[ExternallyTraversible]
) -> Optional[ExternallyTraversible]:
"""Traverse and visit the given expression structure."""
def replace(
element: ExternallyTraversible,
**kw: Any,
) -> Optional[ExternallyTraversible]:
for v in self.visitor_iterator:
e = cast(ReplacingExternalTraversal, v).replace(element)
if e is not None:
return e
return None
return replacement_traverse(obj, self.__traverse_options__, replace)
# backwards compatibility
Traversible = Visitable
ClauseVisitor = ExternalTraversal
CloningVisitor = CloningExternalTraversal
ReplacingCloningVisitor = ReplacingExternalTraversal
def iterate(
obj: Optional[ExternallyTraversible],
opts: Mapping[str, Any] = util.EMPTY_DICT,
) -> Iterator[ExternallyTraversible]:
r"""Traverse the given expression structure, returning an iterator.
Traversal is configured to be breadth-first.
The central API feature used by the :func:`.visitors.iterate`
function is the
:meth:`_expression.ClauseElement.get_children` method of
:class:`_expression.ClauseElement` objects. This method should return all
the :class:`_expression.ClauseElement` objects which are associated with a
particular :class:`_expression.ClauseElement` object. For example, a
:class:`.Case` structure will refer to a series of
:class:`_expression.ColumnElement` objects within its "whens" and "else\_"
member variables.
:param obj: :class:`_expression.ClauseElement` structure to be traversed
:param opts: dictionary of iteration options. This dictionary is usually
empty in modern usage.
"""
if obj is None:
return
yield obj
children = obj.get_children(**opts)
if not children:
return
stack = deque([children])
while stack:
t_iterator = stack.popleft()
for t in t_iterator:
yield t
stack.append(t.get_children(**opts))
@overload
def traverse_using(
iterator: Iterable[ExternallyTraversible],
obj: Literal[None],
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> None:
...
@overload
def traverse_using(
iterator: Iterable[ExternallyTraversible],
obj: ExternallyTraversible,
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> ExternallyTraversible:
...
def traverse_using(
iterator: Iterable[ExternallyTraversible],
obj: Optional[ExternallyTraversible],
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> Optional[ExternallyTraversible]:
"""Visit the given expression structure using the given iterator of
objects.
:func:`.visitors.traverse_using` is usually called internally as the result
of the :func:`.visitors.traverse` function.
:param iterator: an iterable or sequence which will yield
:class:`_expression.ClauseElement`
structures; the iterator is assumed to be the
product of the :func:`.visitors.iterate` function.
:param obj: the :class:`_expression.ClauseElement`
that was used as the target of the
:func:`.iterate` function.
:param visitors: dictionary of visit functions. See :func:`.traverse`
for details on this dictionary.
.. seealso::
:func:`.traverse`
"""
for target in iterator:
meth = visitors.get(target.__visit_name__, None)
if meth:
meth(target)
return obj
@overload
def traverse(
obj: Literal[None],
opts: Mapping[str, Any],
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> None:
...
@overload
def traverse(
obj: ExternallyTraversible,
opts: Mapping[str, Any],
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> ExternallyTraversible:
...
def traverse(
obj: Optional[ExternallyTraversible],
opts: Mapping[str, Any],
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> Optional[ExternallyTraversible]:
"""Traverse and visit the given expression structure using the default
iterator.
e.g.::
from sqlalchemy.sql import visitors
stmt = select(some_table).where(some_table.c.foo == 'bar')
def visit_bindparam(bind_param):
print("found bound value: %s" % bind_param.value)
visitors.traverse(stmt, {}, {"bindparam": visit_bindparam})
The iteration of objects uses the :func:`.visitors.iterate` function,
which does a breadth-first traversal using a stack.
:param obj: :class:`_expression.ClauseElement` structure to be traversed
:param opts: dictionary of iteration options. This dictionary is usually
empty in modern usage.
:param visitors: dictionary of visit functions. The dictionary should
have strings as keys, each of which would correspond to the
``__visit_name__`` of a particular kind of SQL expression object, and
callable functions as values, each of which represents a visitor function
for that kind of object.
"""
return traverse_using(iterate(obj, opts), obj, visitors)
@overload
def cloned_traverse(
obj: Literal[None],
opts: Mapping[str, Any],
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> None:
...
# a bit of controversy here, as the clone of the lead element
# *could* in theory replace with an entirely different kind of element.
# however this is really not how cloned_traverse is ever used internally
# at least.
@overload
def cloned_traverse(
obj: _ET,
opts: Mapping[str, Any],
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> _ET:
...
def cloned_traverse(
obj: Optional[ExternallyTraversible],
opts: Mapping[str, Any],
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> Optional[ExternallyTraversible]:
"""Clone the given expression structure, allowing modifications by
visitors for mutable objects.
Traversal usage is the same as that of :func:`.visitors.traverse`.
The visitor functions present in the ``visitors`` dictionary may also
modify the internals of the given structure as the traversal proceeds.
The :func:`.cloned_traverse` function does **not** provide objects that are
part of the :class:`.Immutable` interface to the visit methods (this
primarily includes :class:`.ColumnClause`, :class:`.Column`,
:class:`.TableClause` and :class:`.Table` objects). As this traversal is
only intended to allow in-place mutation of objects, :class:`.Immutable`
objects are skipped. The :meth:`.Immutable._clone` method is still called
on each object to allow for objects to replace themselves with a different
object based on a clone of their sub-internals (e.g. a
:class:`.ColumnClause` that clones its subquery to return a new
:class:`.ColumnClause`).
.. versionchanged:: 2.0 The :func:`.cloned_traverse` function omits
objects that are part of the :class:`.Immutable` interface.
The central API feature used by the :func:`.visitors.cloned_traverse`
and :func:`.visitors.replacement_traverse` functions, in addition to the
:meth:`_expression.ClauseElement.get_children`
function that is used to achieve
the iteration, is the :meth:`_expression.ClauseElement._copy_internals`
method.
For a :class:`_expression.ClauseElement`
structure to support cloning and replacement
traversals correctly, it needs to be able to pass a cloning function into
its internal members in order to make copies of them.
.. seealso::
:func:`.visitors.traverse`
:func:`.visitors.replacement_traverse`
"""
cloned: Dict[int, ExternallyTraversible] = {}
stop_on = set(opts.get("stop_on", []))
def deferred_copy_internals(
obj: ExternallyTraversible,
) -> ExternallyTraversible:
return cloned_traverse(obj, opts, visitors)
def clone(elem: ExternallyTraversible, **kw: Any) -> ExternallyTraversible:
if elem in stop_on:
return elem
else:
if id(elem) not in cloned:
if "replace" in kw:
newelem = cast(
Optional[ExternallyTraversible], kw["replace"](elem)
)
if newelem is not None:
cloned[id(elem)] = newelem
return newelem
# the _clone method for immutable normally returns "self".
# however, the method is still allowed to return a
# different object altogether; ColumnClause._clone() will
# based on options clone the subquery to which it is associated
# and return the new corresponding column.
cloned[id(elem)] = newelem = elem._clone(clone=clone, **kw)
newelem._copy_internals(clone=clone, **kw)
# however, visit methods which are tasked with in-place
# mutation of the object should not get access to the immutable
# object.
if not elem._is_immutable:
meth = visitors.get(newelem.__visit_name__, None)
if meth:
meth(newelem)
return cloned[id(elem)]
if obj is not None:
obj = clone(
obj, deferred_copy_internals=deferred_copy_internals, **opts
)
clone = None # type: ignore[assignment] # remove gc cycles
return obj
@overload
def replacement_traverse(
obj: Literal[None],
opts: Mapping[str, Any],
replace: _TraverseTransformCallableType,
) -> None:
...
@overload
def replacement_traverse(
obj: ExternallyTraversible,
opts: Mapping[str, Any],
replace: _TraverseTransformCallableType,
) -> ExternallyTraversible:
...
def replacement_traverse(
obj: Optional[ExternallyTraversible],
opts: Mapping[str, Any],
replace: _TraverseTransformCallableType,
) -> Optional[ExternallyTraversible]:
"""Clone the given expression structure, allowing element
replacement by a given replacement function.
This function is very similar to the :func:`.visitors.cloned_traverse`
function, except instead of being passed a dictionary of visitors, all
elements are unconditionally passed into the given replace function.
The replace function then has the option to return an entirely new object
which will replace the one given. If it returns ``None``, then the object
is kept in place.
The difference in usage between :func:`.visitors.cloned_traverse` and
:func:`.visitors.replacement_traverse` is that in the former case, an
already-cloned object is passed to the visitor function, and the visitor
function can then manipulate the internal state of the object.
In the case of the latter, the visitor function should only return an
entirely different object, or do nothing.
The use case for :func:`.visitors.replacement_traverse` is that of
replacing a FROM clause inside of a SQL structure with a different one,
as is a common use case within the ORM.
"""
cloned = {}
stop_on = {id(x) for x in opts.get("stop_on", [])}
def deferred_copy_internals(
obj: ExternallyTraversible,
) -> ExternallyTraversible:
return replacement_traverse(obj, opts, replace)
def clone(elem: ExternallyTraversible, **kw: Any) -> ExternallyTraversible:
if (
id(elem) in stop_on
or "no_replacement_traverse" in elem._annotations
):
return elem
else:
newelem = replace(elem)
if newelem is not None:
stop_on.add(id(newelem))
return newelem
else:
# base "already seen" on id(), not hash, so that we don't
# replace an Annotated element with its non-annotated one, and
# vice versa
id_elem = id(elem)
if id_elem not in cloned:
if "replace" in kw:
newelem = kw["replace"](elem)
if newelem is not None:
cloned[id_elem] = newelem
return newelem
cloned[id_elem] = newelem = elem._clone(**kw)
newelem._copy_internals(clone=clone, **kw)
return cloned[id_elem]
if obj is not None:
obj = clone(
obj, deferred_copy_internals=deferred_copy_internals, **opts
)
clone = None # type: ignore[assignment] # remove gc cycles
return obj
``` |
{
"source": "jmg/glucose_backend",
"score": 3
} |
#### File: backend/auth/views.py
```python
from flask import Blueprint, request, make_response, jsonify
from flask.views import MethodView
from project.backend import bcrypt, db
from project.backend.models import User, BlacklistToken
from functools import wraps
auth_blueprint = Blueprint('auth', __name__)
def login_required(function):
@wraps(function)
def wrap(*args, **kwargs):
auth_header = request.headers.get('Authorization')
if auth_header:
try:
auth_token = auth_header.split(" ")[1]
except IndexError:
response = {
'status': 'fail',
'message': 'Bearer token malformed.'
}
return make_response(jsonify(response)), 401
else:
auth_token = ''
user = User.get_user_from_token(auth_token)
if not user:
response = {
'status': 'fail',
'message': 'Non existent token.'
}
return make_response(jsonify(response)), 401
request.user = user
return function(*args, **kwargs)
return wrap
class RegisterAPI(MethodView):
"""
User Registration Resource
"""
def post(self):
# get the post data
post_data = request.get_json()
# check if user already exists
user = User.query.filter_by(email=post_data.get('email')).first()
if not user:
try:
user = User(
email=post_data.get('email'),
password=post_data.get('password')
)
# insert the user
db.session.add(user)
db.session.commit()
# generate the auth token
auth_token = user.encode_auth_token(user.id)
response = {
'status': 'success',
'message': 'Successfully registered.',
'auth_token': auth_token.decode()
}
return make_response(jsonify(response)), 201
except Exception as e:
response = {
'status': 'fail',
'message': 'Some error occurred. Please try again.'
}
return make_response(jsonify(response)), 401
else:
response = {
'status': 'fail',
'message': 'User already exists. Please Log in.',
}
return make_response(jsonify(response)), 202
class LoginAPI(MethodView):
"""
User Login Resource
"""
def post(self):
# get the post data
post_data = request.get_json()
try:
# fetch the user data
user = User.query.filter_by(
email=post_data.get('email')
).first()
if user and bcrypt.check_password_hash(
user.password, post_data.get('password')
):
auth_token = user.encode_auth_token(user.id)
if auth_token:
response = {
'status': 'success',
'message': 'Successfully logged in.',
'auth_token': auth_token.decode()
}
return make_response(jsonify(response)), 200
else:
response = {
'status': 'fail',
'message': 'User does not exist.'
}
return make_response(jsonify(response)), 404
except Exception as e:
print(e)
response = {
'status': 'fail',
'message': 'Try again'
}
return make_response(jsonify(response)), 500
class LogoutAPI(MethodView):
"""
Logout Resource
"""
def post(self):
# get auth token
auth_header = request.headers.get('Authorization')
if auth_header:
auth_token = auth_header.split(" ")[1]
else:
auth_token = ''
if auth_token:
resp = User.decode_auth_token(auth_token)
if not isinstance(resp, str):
# mark the token as blacklisted
blacklist_token = BlacklistToken(token=auth_token)
try:
# insert the token
db.session.add(blacklist_token)
db.session.commit()
response = {
'status': 'success',
'message': 'Successfully logged out.'
}
return make_response(jsonify(response)), 200
except Exception as e:
response = {
'status': 'fail',
'message': e
}
return make_response(jsonify(response)), 200
else:
response = {
'status': 'fail',
'message': resp
}
return make_response(jsonify(response)), 401
else:
response = {
'status': 'fail',
'message': 'Provide a valid auth token.'
}
return make_response(jsonify(response)), 403
# define the API resources
registration_view = RegisterAPI.as_view('register_api')
login_view = LoginAPI.as_view('login_api')
logout_view = LogoutAPI.as_view('logout_api')
# add Rules for API Endpoints
auth_blueprint.add_url_rule(
'/auth/register',
view_func=registration_view,
methods=['POST']
)
auth_blueprint.add_url_rule(
'/auth/login',
view_func=login_view,
methods=['POST']
)
auth_blueprint.add_url_rule(
'/auth/logout',
view_func=logout_view,
methods=['POST']
)
```
#### File: backend/glucose/views.py
```python
from flask import Blueprint, request, make_response, jsonify
from flask.views import MethodView
from project.backend.models import GlucoseMeasurement
from project.backend.auth.views import login_required
from project.backend.helpers import response_ok, response_fail
from project.backend import config
import sqlalchemy
import json
import feedparser
import random
from datetime import datetime
from dateutil import parser
glucose_blueprint = Blueprint('glucose', __name__)
class TipsAPI(MethodView):
@login_required
def get(self):
entries = feedparser.parse(config.tips_feed_url)
entry = random.choice(entries["entries"])
return response_ok({"tip": entry.title})
class AllTipsAPI(MethodView):
@login_required
def get(self):
entries = feedparser.parse(config.tips_feed_url)
all_entries = []
for entry in entries["entries"]:
all_entries.append({"title": entry["title"], "link": entry["link"], "content": entry["content"][0]["value"]})
return response_ok({"tips": all_entries})
class MeasureAPI(MethodView):
@login_required
def post(self):
data = json.loads(request.data)
value = data["value"]
if "date" in data:
try:
date = parser.parse(data["date"])
except:
return response_fail({"error": "Please enter a valid date"})
else:
date = datetime.utcnow()
glucose = GlucoseMeasurement(date=date, value=value, user=request.user).save()
if value > 140 or value < 80:
status = "not ok"
else:
status = "ok"
return response_ok({"glucose_status": status, "glucose": {"date": glucose.date, "value": float(glucose.value) }})
class AverageAPI(MethodView):
@login_required
def post(self):
data = json.loads(request.data)
date = parser.parse(data["date"])
measurements = GlucoseMeasurement.q().filter(
sqlalchemy.func.extract('year', GlucoseMeasurement.date)==date.year,
sqlalchemy.func.extract('month', GlucoseMeasurement.date)==date.month,
sqlalchemy.func.extract('day', GlucoseMeasurement.date)==date.day,
).all()
avg = 0
if measurements:
for measurement in measurements:
avg += measurement.value
avg = avg / len(measurements)
return response_ok({"avg": float(avg)})
tips_view = TipsAPI.as_view('tips_api')
all_tips_api = AllTipsAPI.as_view('all_tips_api')
measure_view = MeasureAPI.as_view('measure_api')
average_view = AverageAPI.as_view('average_api')
glucose_blueprint.add_url_rule('/glucose/tips', view_func=tips_view, methods=['GET'])
glucose_blueprint.add_url_rule('/glucose/tips/all', view_func=all_tips_api, methods=['GET'])
glucose_blueprint.add_url_rule('/glucose/measure', view_func=measure_view, methods=['POST'])
glucose_blueprint.add_url_rule('/glucose/average', view_func=average_view, methods=['POST'])
``` |
{
"source": "jmgiaever/home-assistant-snap",
"score": 2
} |
#### File: components/updater/__init__.py
```python
from __future__ import annotations
import asyncio, logging, async_timeout, os, voluptuous as vol
from awesomeversion import AwesomeVersion
from datetime import timedelta
from homeassistant.const import __version__ as current_version
from homeassistant.helpers import discovery, update_coordinator
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
_log_wmsg = """
NOTE! Using a replacement (custom) 'updater' component for the snap package.
Do NOT report bugs of any kind related to 'updater' to the Home Assistant core project.
Report any issues at https://github.com/home-assistant-snap/home-assistant-snap/issues"""
ATTR_RELEASE_NOTES = "release_notes"
ATTR_UPDATE_NOTES = "update_notes"
ATTR_NEWEST_VERSION = "newest_version"
# Keeping for consistency (we're overriding)
CONF_REPORTING = "reporting"
CONF_COMPONENT_REPORTING = "include_used_components"
DOMAIN = "updater"
UPDATER_URL = "https://api.snapcraft.io/v2/snaps/info/home-assistant-snap?architecture=%s&fields=channel-map,revision,version"
RESPONSE_SCHEMA = vol.Schema(
{
vol.Required("channel-map"): cv.ensure_list,
vol.Required("default-track"): cv.string,
},
extra=vol.REMOVE_EXTRA,
)
class Channel:
def __init__(self, track: Track, channel: dict, revision: int, version: str):
self.__arch = channel['architecture']
self.__risk = 0
self.__track = track
self.__risks = {'stable': 4, 'candidate': 3, 'beta': 2, 'edge': 1}
if channel['risk'] in self.__risks:
self.__risk = self.__risks[channel['risk']]
self.__revision = revision
self.__version = AwesomeVersion(version)
def __str__(self) -> str:
return f"{str(self.__track)}/{self.get_risk()}"
def __repr__(self) -> str:
return f"{self.__version}, revision: {self.__revision}, channel: {str(self)}"
def get_track(self) -> Track:
return self.__track
def get_risk(self, as_str: bool = True) -> str|int:
if not as_str:
return self.__risk
for v,k in self.__risks.items():
if k == self.__risk:
return v
return self.__risk
def get_revision(self) -> int:
return self.__revision
def get_version(self) -> AwesomeVersion:
return self.__version
def __gt__(self, other: Channel) -> bool:
if self.__track == 'latest':
return False
elif other.get_track() == 'latest':
return False
return self.__revision > other.get_revision() and self.__risk >= other.get_risk(False)
class Track:
def __init__(self, track: str):
self.__track = AwesomeVersion(track)
self.__channels = []
def get_track(self) -> str:
return self.__track
def __eq__(self, other: Track|str) -> bool:
if isinstance(other, Track):
return self.get_track() == other.get_track()
return self.get_track() == other
def add_channel(self, channel) -> Track:
self.__channels.append(Channel(self, channel['channel'], channel['revision'], channel['version']))
self.__channels.sort(key=lambda x: x.get_risk(False))
return self
def get_channels(self) -> list:
return self.__channels
def channel_with_revision(self, revision: int) -> Channel|None:
for channel in self.__channels:
if channel.get_revision() == revision:
return channel
return None
def channel_with_higher_revision(self, channel: Channel) -> Channel|None:
newest = channel
for channel in self.__channels:
if channel > newest:
newest = channel
return newest
def get_latest(self) -> Channel|None:
if len(self.__channels) == 0:
return None
return self.__channels[len(self.__channels)-1]
def __repr__(self) -> str:
risks = []
for channel in self.__channels:
risks.append(f"{channel.get_risk()}/{channel.get_revision()}")
return f"{self.__track}({', '.join(risks)})"
def __str__(self) -> str:
return str(self.__track)
class Tracks:
def __init__(self, channel_map: list) -> None:
self.__tracks = []
for channel in channel_map:
track = self.get_track(channel['channel']['track'])
if track is not None:
track.add_channel(channel)
else:
track = Track(channel['channel']['track'])
track.add_channel(channel)
self.__tracks.append(track)
self.__tracks.sort(key=lambda x: x.get_track())
def get_latest(self) -> Track|None:
self.__tracks.reverse()
latest = None
for track in self.__tracks:
if len(track.get_channels()) != 0 and track.get_latest().get_risk() == 'latest':
latest = track.get_latest()
break
self.__tracks.reverse()
return latest
def get_track(self, track: str) -> Track|None:
for t in self.__tracks:
if t == track:
return t
return None
def find_for_revision(self, revision: int) -> Channel|None:
for track in self.__tracks:
channel = track.channel_with_revision(revision)
if channel is not None:
return channel
return None
def channel_with_higher_revision(self, channel: Channel) -> Channel|None:
for track in self.__tracks:
for chan in track.get_channels():
if chan > channel:
return chan
return None
def track_with_lower_revision(self, revision: int) -> Channel|None:
closest = None
for track in self.__tracks:
for chan in track.get_channels():
if chan.get_revision() < revision:
if closest is not None and closest < chan:
closest = chan
else:
closest = chan
return closest
def __str__(self) -> str:
tracks = []
for track in self.__tracks:
tracks.append(str(track))
return ", ".join(tracks)
def __repr__(self) -> str:
return self.__str__()
class Updater:
""" Updater class for data exchange."""
def __init__(self, update_available: bool, default: Channel, current: Channel|None, newer: Channel|None, update_notes: str) -> None:
self.update_available = update_available
self.newest_version = str(newer.get_version()) if newer is not None else str(default.get_version())
self.update_notes = update_notes
if isinstance(newer, Channel) and default > newer:
self.update_notes += f"\n\nLatest channel is: _«{repr(default)}»_."
elif newer is None and isinstance(current, Channel) and default > current:
self.update_notes += f"\n\nLatest channel is: _«{repr(default)}»_. Upgrade with: `snap switch home-assistant-snap --channel={default}`"
elif current is None and newer is None:
self.update_notes += f"\n\nLatest channel is: _«{repr(default)}»_."
if update_available:
_LOGGER.info("UPDATE AVAILABLE: %s, newer: %s, current: %s, default: %s, notes: %s", update_available, newer, current, default, self.update_notes)
self.release_notes = "https://www.home-assistant.io/blog/categories/core/"
async def async_setup(hass, config):
conf = config.get(DOMAIN, {})
_LOGGER.warning(_log_wmsg)
# Keeping for consistency (we're overriding)
for option in (CONF_COMPONENT_REPORTING, CONF_REPORTING):
if option in conf:
_LOGGER.warning(
"Analytics reporting with the option '%s' "
"is deprecated and you should remove that from your configuration. "
"The analytics part of this integration has moved to the new 'analytics' integration",
option,
)
async def check_new_version() -> Updater:
_LOGGER.warning(_log_wmsg)
snap_rev = os.getenv('SNAP_REVISION')
tracks, default_track = await get_versions(hass)
if snap_rev is None:
if AwesomeVersion(current_version).dev:
snap_rev = 327
_LOGGER.warning(f"Development, using SNAP_REVISION: {snap_rev}")
else:
raise update_coordinator.UpdateFailed(Exception("Missing SNAP_REVISION environment variable."))
if f"{snap_rev[0] if type(snap_rev) is str else 'y'}" == 'x':
c_v = AwesomeVersion(current_version)
track = tracks.get_track(f"{c_v.section(0)}.{c_v.section(1)}")
if track is not None and len(track.get_channels()) != 0:
xsnap_rev = track.get_latest().get_revision()
else:
xsnap_rev = default_track.get_latest().get_revision()
_LOGGER.warning(f"Locally built ({snap_rev}), using SNAP_REVISION: {xsnap_rev}")
snap_rev = xsnap_rev
snap_rev = int(snap_rev)
current_channel = tracks.find_for_revision(snap_rev)
"""
NOTE: This is just predictions - as a revision of a snap might be in several channels,
and always in latest. Therefore you can be on latest and reciving notification on new
releases in another channel, if they have the same revision available
"""
if current_channel.get_track() == "latest":
_LOGGER.warning(
f"You're on the channel «{current_channel}», please consider switch to «{default_track.get_latest()}». "
f"Switch with: sudo snap switch --channel={default_track.get_latest()}"
f"Staying on {current_channel} will auto-upgrade your Home Assistant instance, which "
f"can cause your Home Assistant instance to stop working as of breaking changes."
)
return Updater(False, default_track.get_latest(), current_channel, None,
f"You're on the channel «{current_channel}», please consider switch to «{default_track.get_latest()}». "
f"Switch with: sudo snap switch --channel={default_track.get_latest()}"
f"Staying on {current_channel} will auto-upgrade your Home Assistant instance, which "
f"can cause your Home Assistant instance to stop working as of breaking changes."
)
if current_channel is not None:
newer_channel = current_channel.get_track().channel_with_higher_revision(current_channel)
if newer_channel is not None and newer_channel > current_channel:
return Updater(True, default_track.get_latest(), current_channel, newer_channel,
f"You're currently on _«{repr(current_channel)}»_ and can upgrade to _«{repr(newer_channel)}»_. "
f"Update with `sudo snap switch home-assistant-snap --channel={newer_channel}`."
)
newer_channel = tracks.channel_with_higher_revision(current_channel)
if newer_channel is not None and newer_channel > current_channel:
return Updater(True, default_track.get_latest(), current_channel, newer_channel,
f"You're currently on _«{repr(current_channel)}»_ and can upgrade to _«{repr(newer_channel)}»_. "
f"Update with `sudo snap switch home-assistant-snap --channel={newer_channel}`."
)
return Updater(False, default_track.get_latest(), None, current_channel, f"You're on _«{repr(current_channel)}»_!")
else:
c_v = AwesomeVersion(current_version)
track = tracks.get_track(f"{c_v.section(0)}.{c_v.section(1)}")
if track is not None and track.get_latest() is not None:
current_channel = track.get_latest()
newer_channel = tracks.channel_with_higher_revision(current_channel)
if newer_channel is not None and newer_channel > current_channel:
return Updater(True, default_track.get_latest(), current_channel, newer_channel,
f"Unknown revision «{snap_rev}», assuming on any channel for {current_channel.get_track()}. The snap package "
f"should automatically update, but you can also upgrade to _«{repr(newer_channel)}»_ with: "
f"`sudo snap switch home-assistant-snap --channel={newer_channel}`."
)
return Updater(True, default_track.get_latest(), current_channel, None,
f"Unknown revision «{snap_rev}», assuming on any channel for track {current_channel.get_track()}. The snap package "
f"should automatically update, but double check that the channel is not closed. You can force the update with: "
f"`sudo snap refresh home-assistant-snap` and find channels with: `sudo info home-assistant-snap`."
)
older_track = tracks.track_with_lower_revision(snap_rev)
if older_track is not None:
newer_channel = tracks.channel_with_higher_revision(older_track)
if newer_channel is not None:
newer_channel = newer_channel.get_track().get_latest()
return Updater(True, default_track.get_latest(), None, newer_channel,
f"No channel found for {c_v.section(0)}.{c_v.section(1)}, it might have been deleted - and you will not receive updates. "
f"A newer channel _«{repr(newer_channel)}»_ is available! "
f"You can switch with: `sudo snap refresh home-assistant-snap --channel={newer_channel}`."
)
return Updater(True, default_track.get_latest(), None, None,
f"No channel found for «{snap_rev}» ({c_v.section(0)}.{c_v.section(1)}). "
f"Please consult `snap info home-assistant-snap` to find a suitable track to upgrade to, "
f"and switch channel with: `snap switch home-assistant-snap --channel=<channel>`."
)
coordinator = hass.data[DOMAIN] = update_coordinator.DataUpdateCoordinator[Updater](
hass,
_LOGGER,
name="Home Assistant Snap update",
update_method=check_new_version,
update_interval=timedelta(days=1)
)
asyncio.create_task(coordinator.async_refresh())
hass.async_create_task(
discovery.async_load_platform(hass, 'binary_sensor', DOMAIN, {}, config)
)
return True
from urllib.parse import urlparse
async def get_versions(hass):
session = async_get_clientsession(hass)
snap_arch = os.getenv('SNAP_ARCH')
if snap_arch is None:
if AwesomeVersion(current_version).dev:
snap_arch = "amd64"
else:
raise update_coordinator.UpdateFailed(Exception("Missing SNAP_ARCH environment variable."))
with async_timeout.timeout(45):
req = await session.get(UPDATER_URL % snap_arch, headers={
'Snap-Device-Series': '16'
})
try:
res = await req.json()
except ValueError as err:
raise update_coordinator.UpdateFailed(
f"Received invalid JSON from {urlparse(UPDATER_URL).netloc}"
) from err
try:
res = RESPONSE_SCHEMA(res)
tracks = Tracks(res['channel-map'])
default_track = res['default-track'] if 'default-track' in res else None
if default_track is None:
default_track = tracks.get_latest()
else:
default_track = tracks.get_track(default_track)
return [tracks, default_track]
except vol.Invalid as err:
raise update_coordinator.UpdateFailed(
f"Got unexepected response: {err}"
) from err
``` |
{
"source": "jmgilman/bdantic",
"score": 3
} |
#### File: bdantic/models/file.py
```python
from __future__ import annotations
import lzma
import pickle
from decimal import Decimal
from typing import Any, Dict, List, Optional, Set, Tuple, Type, TypeVar
from beancount import loader
from beancount.core import data, realization
from beancount.query import query
from pydantic import Extra
from bdantic import models
from bdantic.types import ModelDirective, type_map
from .base import Base, BaseList
from .query import QueryResult
from .realize import Account, RealAccount
T = TypeVar("T", bound="ModelDirective")
class Directives(BaseList, smart_union=True):
"""A model representing a list of directives.
This models wraps the entries response often returned when loading the
content of a beancount file. It holds a list of various valid directive
models.
"""
__root__: List[ModelDirective]
@classmethod
def parse(cls, obj: List[data.Directive]) -> Directives:
"""Parses a list of beancount directives into this model
Args:
obj: The Beancount directives to parse
Returns:
A new instance of this model
"""
dirs = []
dirs = [type_map[type(d)].parse(d) for d in obj] # type: ignore
return Directives(__root__=dirs)
def export(self) -> List[data.Directive]:
"""Exports this model into a list of beancount directives
Returns:
The list of beancount directives
"""
dirs = [d.export() for d in self.__root__]
return dirs
def by_account(self, account: str) -> Directives:
"""Returns a new instance of `Directives` filtered by the given account.
Args:
account: The account to filter by.
Returns:
A new instance of `Directives` with the filtered results.
"""
result: List[ModelDirective] = []
simple = (
models.Open,
models.Close,
models.Balance,
models.Note,
models.Document,
)
for dir in self:
if isinstance(dir, models.Transaction):
if account in [p.account for p in dir.postings]:
result.append(dir)
elif isinstance(dir, simple):
if dir.account == account:
result.append(dir)
elif isinstance(dir, models.Pad):
if dir.account == account or dir.source_account == account:
result.append(dir)
elif isinstance(dir, models.Custom):
for v in dir.values:
if isinstance(v, str):
if v == account:
result.append(dir)
return Directives(__root__=result)
def by_id(self, id: str) -> ModelDirective:
"""Returns the directive with the given ID.
Args:
id: The directive ID.
Raises:
IDNotFoundError: If the given ID was not found.
Returns:
The directive.
"""
id_map = {d.id: d for d in self}
if id not in id_map:
raise IDNotFoundError(f"Failed to find directive with ID: {id}")
return id_map[id]
def by_ids(self, ids: List[str]) -> List[ModelDirective]:
"""Returns a list of directives matching the given ID's.
Args:
ids: A list of ID's to get.
Raises:
IDNotFoundError: If any of the given ID's were not found.
Returns:
A list of the directives.
"""
result = []
for id in ids:
result.append(self.by_id(id))
return result
def by_type(self, ty: Type[T]) -> Directives:
"""Returns a new instance of `Directives` filtered by the given type.
Args:
ty: The type to filter by.
Returns:
A new instance of `Directives` with the filtered results.
"""
return Directives(__root__=super()._by_type(ty))
class Options(Base):
"""A model representing ledger options.
This model wraps the options contained within a ledger. Options which
contain raw beancount types are automatically parsed into their respective
model.
See the docs for more details about each field:
https://beancount.github.io/docs/beancount_options_reference.html
"""
account_current_conversions: Optional[str] = None
account_current_earnings: Optional[str] = None
account_previous_balances: Optional[str] = None
account_previous_conversions: Optional[str] = None
account_previous_earnings: Optional[str] = None
account_rounding: Optional[str] = None
allow_deprecated_none_for_tags_and_links: Optional[bool] = None
allow_pipe_separator: Optional[bool] = None
booking_method: Optional[data.Booking] = None
commodities: Optional[Set[str]] = None
conversion_currency: Optional[str] = None
dcontext: Optional[models.DisplayContext] = None
documents: Optional[List[str]] = None
experiment_explicit_tolerances: Optional[bool] = None
filename: Optional[str] = None
include: Optional[List[str]] = None
infer_tolerance_from_cost: Optional[bool] = None
inferred_tolerance_default: Optional[Dict[str, Decimal]]
inferred_tolerance_multiplier: Optional[Decimal] = None
input_hash: Optional[str] = None
insert_pythonpath: Optional[bool] = None
long_string_maxlines: Optional[int] = None
name_assets: Optional[str] = None
name_equity: Optional[str] = None
name_expenses: Optional[str] = None
name_income: Optional[str] = None
name_liabilities: Optional[str] = None
operating_currency: Optional[List[str]] = None
plugin: Optional[List[str]] = None
plugin_processing_mode: Optional[str] = None
render_commas: Optional[bool] = None
tolerance: Optional[Decimal] = None
title: Optional[str] = None
use_legacy_fixed_tolerances: Optional[bool] = None
class Config:
extra = Extra.allow
@classmethod
def parse(cls, obj: Dict[str, Any]) -> Options:
"""Parses a dictionary of beancount options into this model
Args:
obj: The Beancount options to parse
Returns:
A new instance of this model
"""
d = {}
for key, value in obj.items():
if type(value) in type_map.keys():
d[key] = type_map[type(value)].parse(value)
else:
d[key] = value
return Options(**d)
def export(self) -> Dict[str, Any]:
"""Exports this model into a dictionary of beancount options
Returns:
The dictionary of beancount options
"""
d = {}
for key, value in self.__dict__.items():
if type(value) in type_map.values():
d[key] = value.export() # type: ignore
else:
d[key] = value
return d
class BeancountFile(Base):
"""A model representing the contents of an entire beancount file.
This model provides an interface for accessing the result returned when
loading the contents of a beancount file. It's constructor can be fed the
(entries, errors, options) tuple often returned from loader functions.
Attributes:
entries: The directives parsed from the beancount file.
options: The options parsed from the beancount file.
errors: Any errors generated during parsing.
accounts: A dictionary of account names to `Account` instances
"""
entries: Directives
options: Options
errors: List[Any]
accounts: Dict[str, Account]
@classmethod
def parse(
cls,
obj: Tuple[List[data.Directive], List[Any], Dict[str, Any]],
) -> BeancountFile:
"""Parses the results of loading a beancount file into this model.
Args:
obj: The results from calling the beancount loader
Returns:
A new instance of this model
"""
entries = Directives.parse(obj[0])
errors = obj[1]
options = Options.parse(obj[2])
real = realization.realize(obj[0])
names = [o.account for o in entries.by_type(models.Open)]
accounts = {}
for name in names:
accounts[name] = Account.parse(realization.get(real, name))
return BeancountFile(
entries=entries,
options=options,
errors=errors,
accounts=accounts,
)
@staticmethod
def decompress(data: bytes) -> BeancountFile:
"""Decompresses the given data into a `BeancountFile` instance.
Args:
data: The bytes from an LZMA compressed pickled `BeancountFile`.
Returns:
The decompressed, unpickled `BeancountFile` instance.
"""
return pickle.loads(lzma.decompress(data))
def export(self) -> Tuple[List[data.Directive], List[Any], Dict[str, Any]]:
"""Exports this model into it's original counterpart
Returns:
The entries, errors, and options from the original loader
"""
return (self.entries.export(), self.errors, self.options.export())
def compress(self) -> bytes:
"""Compresses this instance into a byte stream.
Returns:
An LZMA compressed pickle instance of this instance.
"""
return lzma.compress(pickle.dumps(self))
def hash(self) -> str:
"""Generates a unique hash for this `BeancountFile`.
This method uses existing logic provided by the beancount package to
calculate a hash which should be unique to the current state of the
ledger. Specifically, it can be used to ensure that the underlying
files used in parsing have not changed. If this instance was created
dynamically without being fed data from the loader it will fail.
Raises:
FileNotFoundError: If no source files were found
Returns:
An MD5 hash.
"""
if self.options.include:
return loader.compute_input_hash(self.options.include)
elif self.options.filename:
return loader.compute_input_hash([self.options.filename])
else:
raise FileNotFoundError(
"No source files associated with this instance"
)
def query(self, query_str: str) -> QueryResult:
"""Executes the given BQL query against the entries in this file.
Args:
query_str: The BQL query to execute.
Returns:
A `QueryResult` containing the results of the query.
"""
result = query.run_query(
self.entries.export(), self.options.export(), query_str
)
return QueryResult.parse(result)
def realize(self) -> RealAccount:
"""Realizes the entries in this file.
Returns:
The root `RealAccount` from the realization.
"""
root = realization.realize(self.entries.export())
return RealAccount.parse(root)
class IDNotFoundError(Exception):
"""Thrown when a `Directives` instance doesn't contain the given id."""
pass
```
#### File: bdantic/models/realize.py
```python
from __future__ import annotations
from datetime import date
from typing import Dict, List, Literal, Optional, Type, TypeVar, Union
from beancount.core import data, realization
from pydantic import BaseModel
from .base import Base, BaseList
from .data import Account as AccountName
from .data import Inventory
from .directives import Balance, Close, Document, Note, Open, Pad, TxnPosting
T = TypeVar("T", bound="ModelTxnPosting")
ModelTxnPosting = Union[Balance, Close, Document, Note, Open, Pad, TxnPosting]
BeanTxnPosting = Union[
data.Balance,
data.Close,
data.Document,
data.Note,
data.Open,
data.Pad,
data.TxnPosting,
]
_type_map: Dict[Type[BeanTxnPosting], Type[ModelTxnPosting]] = {
data.Balance: Balance,
data.Close: Close,
data.Document: Document,
data.Note: Note,
data.Open: Open,
data.Pad: Pad,
data.TxnPosting: TxnPosting,
}
class Account(BaseModel):
"""A simplified view of an entire beancount account.
The primary differenece between this and a `RealAccount` is that it strips
out all children and directives associated with the account. Additionally,
it added some useful data about an account like open/close date. The
removal of the children and directives greatly reduces the size of this
object, especially when serialized.
Attributes:
balance: A mapping of currencies to inventories.
close: The (optional) date the account was closed.
name: The account name.
open: The date the account was opened.
"""
balance: Dict[str, Inventory]
close: Optional[date] = None
name: str
open: date
@staticmethod
def parse(obj: realization.RealAccount) -> Account:
"""Parses a beancount RealAccount into this model
Args:
obj: The Beancount RealAccount
Returns:
A new instance of this model
"""
open_date = None
close_date = None
for dir in obj.txn_postings:
if isinstance(dir, data.Open):
open_date = dir.date
elif isinstance(dir, data.Close):
close_date = dir.date
split = obj.balance.split()
map = {}
for k, v in split.items():
map[k] = Inventory.parse(v)
return Account(
balance=map,
close=close_date,
open=open_date,
name=obj.account,
)
@staticmethod
def from_real(ra: RealAccount) -> Account:
"""Creates a new instance of `Account` using details from a
[RealAccount][bdantic.models.realize.RealAccount].
Args:
ra: The RealAccount to use
Returns:
A new instance of Account
"""
open = ra.txn_postings.by_type(Open)
assert open is not None
assert len(open) == 1
close = ra.txn_postings.by_type(Close)
if close:
assert len(close) < 2
close_date = close[0].date
else:
close_date = None
return Account(
balance=ra.cur_map,
close=close_date,
open=open[0].date,
name=ra.account,
)
def export(self):
raise NotImplementedError
class RealAccount(Base, smart_union=True):
"""A model representing a `beancount.core.realize.RealAccount`.
A `RealAccount` is represented as a dictionary in beancount which contains
additional attributes for describing details about the account. This model
matches those details, however, the dictinary representation of a
`RealAccount` is moved to the dedicated `children` field.
Attributes:
ty: A string literal identifying this model.
account: The account name.
balance: The balance of the account
children: All children that belong to this account.
cur_map: A map of currencies to their respective balances.
txn_postings: A list of directives in which this account appears.
"""
ty: Literal["RealAccount"] = "RealAccount"
account: AccountName
balance: Inventory
children: Dict[str, RealAccount]
cur_map: Dict[str, Inventory]
txn_postings: TxnPostings
@classmethod
def parse(cls, obj: realization.RealAccount) -> RealAccount:
"""Parses a beancount RealAccount into this model
Args:
obj: The Beancount RealAccount
Returns:
A new instance of this model
"""
children = {}
for k, v in obj.items():
children[k] = RealAccount.parse(v)
split = obj.balance.split()
map = {}
for k, v in split.items():
map[k] = Inventory.parse(v)
return RealAccount(
account=obj.account,
balance=Inventory.parse(obj.balance),
children=children,
cur_map=map,
txn_postings=TxnPostings.parse(obj.txn_postings), # type: ignore
)
def export(self) -> realization.RealAccount:
"""Exports this model into a beancount RealAccount
Returns:
A new instance of a beancount RealAccount
"""
ra = realization.RealAccount(self.account)
for k, v in self.children.items():
ra[k] = v.export()
ra.txn_postings = self.txn_postings.export() # type: ignore
ra.balance = self.balance.export()
return ra
def get(self, account_name: str) -> Optional[RealAccount]:
"""Fetches a nested child account from this `RealAccount` instance.
Args:
account_name: The account to fetch.
Returns:
The `RealAccount` instance if the account exists, otherwise None.
"""
account = self
try:
for key in account_name.split(":"):
account = account.children[key]
except KeyError:
return None
return account
def to_account(self) -> Account:
"""Converts this RealAccount into an Account instance.
Returns:
A new Account instance
"""
return Account.from_real(self)
class TxnPostings(BaseList):
"""A model representing the txnpostings found within RealAccount's."""
__root__: List[ModelTxnPosting]
@classmethod
def parse(
cls,
obj: List[BeanTxnPosting],
) -> TxnPostings:
return TxnPostings(
__root__=[_type_map[type(d)].parse(d) for d in obj] # type: ignore
)
def export(self) -> List[BeanTxnPosting]:
return [d.export() for d in self.__root__]
def by_type(self, ty: Type[T]) -> TxnPostings:
"""Returns a new instance of `TxnPostings` filtered by the given type.
Args:
ty: The type to filter by.
Returns:
A new instance of `TxnPostings` with the filtered results.
"""
return TxnPostings(__root__=super()._by_type(ty))
# Update forward references
Account.update_forward_refs()
RealAccount.update_forward_refs()
TxnPostings.update_forward_refs()
```
#### File: bdantic/tests/base_test.py
```python
from datetime import date
from decimal import Decimal
from beancount.core import amount, data
from bdantic.models import base
from bdantic.models import data as mdata
from bdantic.models import directives
def test_recursive_parse():
txn = data.Transaction(
meta={
"filename": "test.beancount",
"lineno": 123,
},
date=date.today(),
flag="*",
payee="test",
narration="test",
tags=None,
links=None,
postings=[
data.Posting(
account="Test",
units=amount.Amount(number=Decimal(1.50), currency="USD"),
cost=None,
price=None,
flag=None,
meta={},
)
],
)
expected = {
"meta": {
"filename": "test.beancount",
"lineno": 123,
},
"date": date.today(),
"flag": "*",
"payee": "test",
"narration": "test",
"tags": None,
"links": None,
"postings": [
{
"account": "Test",
"units": {
"number": Decimal(1.50),
"currency": "USD",
},
"cost": None,
"price": None,
"flag": None,
"meta": {},
}
],
}
result = base.recursive_parse(txn)
assert result == expected
def test_recursive_export():
txn = directives.Transaction(
id="",
meta={
"filename": "test.beancount",
"lineno": 123,
},
date=date.today(),
flag="*",
payee="test",
narration="test",
tags=None,
links=None,
postings=[
directives.Posting(
account="Test",
units=mdata.Amount(number=Decimal(1.50), currency="USD"),
cost=None,
price=None,
flag=None,
meta={},
)
],
)
expected = {
"meta": {
"filename": "test.beancount",
"lineno": 123,
},
"date": date.today(),
"flag": "*",
"payee": "test",
"narration": "test",
"tags": None,
"links": None,
"postings": [
data.Posting(
account="Test",
units=amount.Amount(number=Decimal(1.50), currency="USD"),
cost=None,
price=None,
flag=None,
meta={},
)
],
}
result = base.recursive_export(txn, base._IGNORE_FIELDS)
assert result == expected
```
#### File: bdantic/tests/conftest.py
```python
import datetime
import hashlib
import io
import os
import pickle
import random
from typing import Any, List, Type
import pytest
from beancount import loader
from beancount.core import data, distribution
from beancount.scripts import example # type: ignore
from pydantic import BaseModel
from bdantic import models, types
from bdantic.models import base
class Ctx(BaseModel):
"""Holds contextual information when comparing values.
Attributes:
partial: The type of comparison to perform
recurse: The types of objects to recursively compare
"""
recurse: List[Type] = [
models.Amount,
models.Close,
models.Cost,
models.CostSpec,
models.CurrencyContext,
models.DisplayContext,
distribution.Distribution,
models.Distribution,
base.Meta,
models.Open,
models.Posting,
models.Position,
models.Transaction,
models.TxnPosting,
]
def is_recurse(self, obj1: Any, obj2: Any) -> bool:
"""Returns whether or not the two objects should be recursed.
Args:
obj1: An object to check
obj2: An object to check
Returns:
True if the objects should be recursed, False otherwise
"""
return type(obj1) in self.recurse or type(obj2) in self.recurse
def compare(self, obj1: Any, obj2: Any, partial: bool = True) -> None:
"""Compares two objects by asserting equality.
The type of comparison performed is dependent on the types of the
passed objects and information contained within the context. Objects
that are recursible will have their attributes compared, dictionaries
and lists will be iterated over to find recursible objects and
keys/values will be asserted to be equal, all other types will be
asserted to be equal to each other.
Args:
obj1: The first object to compare
obj2: The second object to compare
partial: If True, allows objects to have different attributes.
Raises:
AssertionError when an equality check fails
"""
if hasattr(obj1, "__root__"):
obj1 = obj1.__root__
elif hasattr(obj2, "__root__"):
obj2 = obj2.__root__
if self.is_recurse(obj1, obj2):
self.compare_object(obj1, obj2, partial)
elif isinstance(obj1, dict) and isinstance(obj2, dict):
self.compare_dict(obj1, obj2)
elif isinstance(obj1, list) and isinstance(obj2, list):
self.compare_list(obj1, obj2)
elif isinstance(obj1, tuple) and isinstance(obj2, tuple):
self.compare_list(obj1, obj2)
else:
assert obj1 == obj2
def compare_dict(self, dict1, dict2, partial: bool = True) -> None:
"""Compares two dictionaries, asserting they are equal.
Args:
dict1: The first dictionary to compare
dict2: The second dictionary to compare
partial: If True, allows objects to have different attributes.
Raises:
AssertionError when an equality check fails
"""
assert not set(dict1.keys()).difference(set(dict2.keys()))
for key in dict1:
self.compare(dict1[key], dict2[key], partial)
def compare_list(self, list1, list2, partial: bool = True) -> None:
"""Compares two lists, asserting they are equal.
Args:
list1: The first list to compare
list2: The second list to compare
partial: If True, allows objects to have different attributes.
Raises:
AssertionError when an equality check fails
"""
assert len(list1) == len(list2)
for i in range(len(list1)):
self.compare(list1[i], list2[i], partial)
def compare_object(
self, obj1: Any, obj2: Any, partial: bool = True
) -> None:
"""Compares two objects, asserting they are equal.
Objects are compared by iterating over their attributes and asserting
equality. If the context is set to partial, only attributes which the
two objects share will be asserted equal. Attributes which are lists or
dictionaries will be recursively checked. Nested objects are only
recursed if their types are found in the recurse attribute of the given
context.
Args:
obj1: The first object to compare
obj2: The second object to compare
partial: If True, allows objects to have different attributes.
Raises:
AssertionError when an equality check fails
"""
def is_valid_attr(k: str, obj: Any) -> bool:
if k.startswith("__"):
return False
elif callable(getattr(obj, k)):
return False
return True
attr1 = set([attr for attr in dir(obj1) if is_valid_attr(attr, obj1)])
attr2 = set([attr for attr in dir(obj2) if is_valid_attr(attr, obj2)])
if not partial:
assert not attr1.difference(
attr2
), "Objects have dissimilar attributes"
attrs = attr1
else:
attrs = attr1.intersection(attr2)
for attr in attrs:
val1 = getattr(obj1, attr)
val2 = getattr(obj2, attr)
self.compare(val1, val2, partial)
@pytest.fixture(scope="session")
def ctx() -> Ctx:
return Ctx()
@pytest.fixture(scope="session")
def beanfile() -> tuple[list[data.Directive], list, dict[str, Any]]:
end = datetime.date.today()
start_offset = random.randrange(2, 10)
start_month = random.randrange(1, 12)
start_day = random.randrange(1, 28)
start = datetime.date(end.year - start_offset, start_month, start_day)
birth_offset = random.randrange(20, 40)
birth_month = random.randrange(1, 12)
birth_day = random.randrange(1, 28)
birth = datetime.date(end.year - birth_offset, birth_month, birth_day)
with io.StringIO() as s:
example.write_example_file(birth, start, end, True, s)
s.seek(0)
return loader.load_string(s.read())
@pytest.fixture(scope="session")
def syntax() -> list[tuple[str, type[types.ModelDirective]]]:
balance = "2022-01-01 balance Assets:US:BofA:Checking 2845.77 USD"
close = "2022-01-01 close Equity:Opening-Balances"
commodity = """2022-01-01 commodity USD
export: "CASH"
name: "<NAME>" """
document = f"""
2022-01-01 document Assets:US:Vanguard:Cash "{os.getcwd()}/test.doc" """
event = """2022-01-01 event "location" "Paris, France" """
note = """2022-01-01 note Liabilities:Credit "Called about fraudulence" """
open = """2022-01-01 open Liabilities:Credit:CapitalOne USD"""
pad = "2022-01-01 pad Assets:BofA:Checking Equity:Opening-Balances"
price = "2022-01-01 price HOOL 579.18 USD"
query = """2022-01-01 query "france-balances" "
SELECT account, sum(position) WHERE `trip-france-2014` in tags" """
transaction = """2022-01-01 * "Investing 40% of cash in VBMPX"
Assets:US:Vanguard:VBMPX 1.122 VBMPX {213.90 USD, 2022-01-01}
Assets:US:Vanguard:Cash -240.00 USD"""
return [
(balance, models.Balance),
(close, models.Close),
(commodity, models.Commodity),
(document, models.Document),
(event, models.Event),
(note, models.Note),
(open, models.Open),
(pad, models.Pad),
(price, models.Price),
(query, models.Query),
(transaction, models.Transaction),
]
def hash(obj) -> str:
"""Hashes the given object.
Args:
obj: The object to hash.
Returns:
An MD5 hash of the object.
"""
return hashlib.md5(pickle.dumps(obj)).hexdigest()
``` |
{
"source": "jmgilman/beancount-example",
"score": 2
} |
#### File: app/tests/conftest.py
```python
import pytest
from aiohttp import web
from app import main
@pytest.fixture
def cli(event_loop, aiohttp_client):
app = web.Application()
app.router.add_get("/", main.serve)
return event_loop.run_until_complete(aiohttp_client(app))
```
#### File: beancount-example/tests/test_smoke.py
```python
import requests # type: ignore
def test_smoke(app):
port = app.ports["8001/tcp"][0]
url = f"http://localhost:{port}/"
result = requests.get(url)
content = result.text
assert result.status_code == 200
assert len(content) > 0
result = requests.get(url)
new_content = result.text
assert result.status_code == 200
assert content == new_content
result = requests.get(f"{url}?reset")
content = result.text
assert result.status_code == 200
assert content != new_content
``` |
{
"source": "jmgilman/beancount-hypothesis",
"score": 4
} |
#### File: beancount-hypothesis/beancount_hypothesis/account.py
```python
import random
from dataclasses import dataclass, field
from hypothesis import strategies
from random_words import RandomWords # type: ignore
@dataclass
class AccountGenerator:
"""A class for generating semi-realistic account structures.
This class provides a single method, generate(), which will create a list
of nested accounts. The final result is intended to look semi-realistic in
the sense that account names use real words and typically have one or more
subaccounts (except for the leaves). The class attributes can be modified
in order to control how the structure is generated.
Attributes:
min_leaves: The minimum number of leaves to generate for each node
max_leaves: The maximum number of leaves to generate for each node
min_nodes: The minimum number of nodes to generate for each leaf
max_nodes: The max number of nodes to generate for each leaf
"""
min_leaves: int = 1
max_leaves: int = 3
min_nodes: int = 3
max_nodes: int = 5
rw: RandomWords = field(default_factory=RandomWords)
def generate(self) -> list[str]:
"""Generates a list of semi-realistic account names.
Example:
[
"Inquiry:Bars",
"Inquiry:Entrance",
"Inquiry:Introduction",
"Successes:Armament",
"Successes:Rail:Flares",
"Successes:Rail:Spindle",
"Successes:Rail:Ones",
"Successes:Presses",
"Successes:Waste:Hugs",
"Successes:Waste:Catcher",
"Successes:Waste:Signs",
"Beams:Failure",
"Beams:Cylinder",
"Beams:Kiss",
"Beams:Diseases"
]
"""
accounts: list[str] = []
for segments in _walk_dict(self._make_tree()):
accounts.append(":".join(segments))
return accounts
def _make_tree(self, depth=0):
"""Generates a nested tree structure using random words as keys."""
if depth >= self.max_leaves:
return None
names = self._rand_words()
d = dict.fromkeys(names)
for name in names:
d[name] = self._make_tree(depth + self._rand_leave())
return d
def _rand_leave(self) -> int:
"""Generates a random number of leaves to generate."""
if self.min_leaves == self.max_leaves:
return 1
else:
return random.randrange(self.min_leaves, self.max_leaves)
def _rand_node(self) -> int:
"""Generates a random number of nodes to generate."""
if self.min_nodes == self.max_nodes:
return self.min_nodes
else:
return random.randrange(self.min_nodes, self.max_nodes)
def _rand_words(self) -> list[str]:
"""Generates a random number of words as configured by the class."""
return [
w.capitalize()
for w in self.rw.random_words(count=self._rand_node())
]
@strategies.composite
def account_name(_) -> str:
"""Generates a random account name.
Returns:
A random account name.
"""
ag = AccountGenerator(min_nodes=1, max_nodes=1, min_leaves=3, max_leaves=3)
return ag.generate()[0]
def _walk_dict(d: dict, pre: list | None = None):
"""Walks the keys of the given dictionary, returning leaves as lists.
This function will recursively walk a nested dictionary and generate a list
of keys for all leaves contained within the nested structure. The given
structure should only contain nested dictionaries and leaf values are
discarded as this function is only concerned with dictionary keys.
Args:
d: The dictionary to walk
pre: Used in recursion
Yields:
Lists for each leaf contained within the structure.
For example:
{
"one":{
"two":{
"three": None
}
},
"four":{
"five":{
"six": None
}
}
}
Would yield:
['one', 'two', 'three']
['four', 'five', 'six']
"""
pre = pre[:] if pre else []
if isinstance(d, dict):
for key, value in d.items():
if isinstance(value, dict):
for d in _walk_dict(value, pre + [key]):
yield d
else:
yield pre + [key]
else:
yield pre + [d]
```
#### File: beancount-hypothesis/tests/test_directive.py
```python
import string
from beancount_hypothesis import directive
def assert_account(account: str):
assert len(account.split(":")) == 3
def assert_currency(currency: str):
assert len(currency) == 3
assert all([c in string.ascii_uppercase for c in currency])
def assert_filename(filename: str):
assert len(filename.split("/")) > 1
assert len(filename.split(".")[1]) == 3
def test_meta():
result = directive.meta().example()
assert isinstance(result["filename"], str)
assert isinstance(result["lineno"], int)
def test_balance():
result = directive.balance().example()
assert_account(result.account)
def test_close():
result = directive.balance().example()
assert_account(result.account)
def test_commodity():
result = directive.commodity().example()
assert_currency(result.currency)
result = directive.commodity(["USD", "CAD"]).example()
assert result.currency in ["USD", "CAD"]
def test_custom():
result = directive.custom().example()
assert all([isinstance(v, str) for v in result.values])
def test_document():
result = directive.document().example()
assert_filename(result.filename)
def test_event():
directive.event().example()
def test_note():
result = directive.note().example()
assert_account(result.account)
def test_open():
result = directive.open().example()
assert_account(result.account)
[assert_currency(c) for c in result.currencies]
def test_pad():
result = directive.pad().example()
assert_account(result.account)
assert_account(result.source_account)
def test_posting():
directive.posting().example()
def test_price():
result = directive.price().example()
assert_currency(result.currency)
result = directive.price(["USD", "CAD"]).example()
assert result.currency in ["USD", "CAD"]
def test_query():
directive.query().example()
def test_transaction():
result = directive.transaction().example()
assert result.flag == "*"
assert len(result.narration.split(" ")) > 1
assert len(result.postings) <= 6
``` |
{
"source": "jmgilman/fapi",
"score": 3
} |
#### File: core/auth/jwt.py
```python
from app.core import base
from fastapi import Request
from pydantic import BaseModel
import jwt
class JWTConfig(BaseModel):
"""Configuration class for configuring JWT authentication
Attributes:
algorithms: A comma separated list of approved algorithms to use
audience: The API audience
jwks: The URL to a JWKS endpoint for fetching keys
issuer: The token issuer
"""
algorithms: str = "RS256"
audience: str = ""
jwks: str = ""
issuer: str = ""
class JWTAuth(base.BaseAuth):
"""Provides an interface for authenticating requests with JWT tokens."""
def authenticate(self, request: Request) -> bool:
assert self.settings.jwt is not None
header: str = request.headers.get("Authorization", None)
if not header:
return False
try:
token: str = header.split("Bearer ")[1]
except IndexError:
return False
signing_key = self.client().get_signing_key_from_jwt(token).key
try:
jwt.decode(
token,
signing_key,
algorithms=self.settings.jwt.algorithms.split(", "),
audience=self.settings.jwt.audience,
issuer=self.settings.jwt.issuer,
)
except jwt.exceptions.DecodeError:
return False
return True
def client(self) -> jwt.PyJWKClient:
"""Creates a new `PyJWKClient` instance.
Returns:
A configured `PyJWKClient` instance.
"""
assert self.settings.jwt is not None
return jwt.PyJWKClient(self.settings.jwt.jwks)
@staticmethod
def validate(settings) -> None:
if settings.jwt is None:
raise base.ValidationError(
"Must set environment variables for JWT"
)
elif not settings.jwt.audience:
raise base.ValidationError(
"Must set the JWT audience environment variable"
)
elif not settings.jwt.jwks:
raise base.ValidationError(
"Must set the JWT JWKS environment variable"
)
elif not settings.jwt.issuer:
raise base.ValidationError(
"Must set the issuer environment variable"
)
```
#### File: app/core/base.py
```python
from __future__ import annotations
from typing import TYPE_CHECKING
from bdantic import models
from fastapi import Request
if TYPE_CHECKING:
from app.core.settings import Settings
class BaseAuth:
"""Base class for authentication providers.
Attributes:
settings: An instance of `Settings` containing the configured settings.
"""
settings: Settings
def __init__(self, settings: Settings):
self.settings = settings
def authenticate(self, request: Request) -> bool:
"""Authenticates the given request using the configured settings.
Args:
request: The HTTP request.
Returns:
True if authenticated, false otherwise.
"""
pass
@staticmethod
def validate(settings: Settings) -> None:
"""Validates that the provided settings are complete.
Args:
settings: The settings to validate.
Raises:
ValidationError: If the given settings fail to validate.
"""
pass
class BaseStorage:
"""Base class for storage providers.
Attributes:
settings: An instance of `Settings` containing the configured settings.
"""
settings: Settings
def __init__(self, settings: Settings):
self.settings = settings
def load(self) -> models.BeancountFile:
"""Returns a new instance of `BeancountFile` with the loaded ledger.
Returns:
A `BeancountFile` instance with the loaded ledger contents.
"""
pass
def changed(self, bf: models.BeancountFile) -> bool:
"""Returns if the underlying storage has changed.
Args:
bf: The `BeancountFile` instance used to compare for changes.
Returns:
True if a change is detected, False otherwise.
"""
pass
class ValidationError(Exception):
"""Raised when configured settings fail to validate."""
pass
```
#### File: app/core/cache.py
```python
import asyncio
from dataclasses import dataclass
import cachetools
from anyio import Lock
from app.core import base
from bdantic import models
from loguru import logger
@dataclass
class Cache(cachetools.Cache):
"""A cache for storing a `BeancountFile`.
This class provides global access to a cached instance of `BeancountFile`
which can be reused across requests. An invalidator method is automatically
run on startup and is responsible for invalidating request when the
underlying storage changes.
Attributes:
interval: Frequency that the invalidator should check the storage.
storage: The underlying storage being used.
"""
interval: int
lock: Lock
storage: base.BaseStorage
_value: models.BeancountFile
def __init__(self, storage: base.BaseStorage, interval: int = 5):
super().__init__(50)
self.storage = storage
self.interval = interval
self.lock = Lock()
async def beanfile(self) -> models.BeancountFile:
async with self.lock:
return self["beanfile"]
async def load(self):
logger.info("Loading cache data")
async with self.lock:
self["beanfile"] = self.storage.load()
logger.info("Cache data successfully loaded")
async def background(self):
"""An async loop for managing the cache."""
# Prime the cache
logger.info("Priming cache")
await self.load()
logger.info("Entering main cache loop")
while True:
# Check for state changes
if self.storage.changed(self["beanfile"]):
logger.info("Cache invalidated")
await self.load()
await asyncio.sleep(self.interval)
```
#### File: core/storage/s3.py
```python
import os
from pathlib import Path
from typing import Any
import boto3 # type: ignore
from app.core import base, beancount
from bdantic import models
from loguru import logger
from pydantic import BaseModel
class S3Config(BaseModel):
"""Configuration class for Amazon S3 support.
Attributes:
bucket: The S3 bucket name to download ledger files from
"""
bucket: str = ""
class S3Storage(base.BaseStorage):
"""Provides an interface for fetching Beancount ledger files from Amazon S3.
This class expects the main ledger file as well as all supporting ledger
files to be contained within a single S3 bucket. No filtering is done when
inspecting the bucket and therefore all files contained within the bucket
will be downloaded to the working directory specified via the settings.
Attrs:
bucket: An instance of a S3 bucket
"""
bucket: Any = None
def __init__(self, settings):
super().__init__(settings)
if not self.settings.s3.bucket:
raise base.ValidationError(
"Must set the S3 bucket environment variable"
)
self.bucket = boto3.resource("s3").Bucket(self.settings.s3.bucket)
def load(self) -> models.BeancountFile:
assert self.settings.s3 is not None
logger.info(
f"Downloading objects from {self.settings.s3.bucket} bucket"
)
Path(self.settings.work_dir).mkdir(parents=True, exist_ok=True)
for object in self.bucket.objects.all():
logger.info(f"Downloading {object.key}")
self._download(object.key)
logger.info(f"Loading data from {self.settings.entry_path()}")
return beancount.from_file(self.settings.entry_path())
def changed(self, _: models.BeancountFile) -> bool:
# TODO: Add support for cache invalidation
return False
@staticmethod
def validate(settings) -> None:
if settings.s3 is None:
raise base.ValidationError("Must set environment variables for S3")
elif not settings.s3.bucket:
raise base.ValidationError(
"Must set the S3 bucket environment variable"
)
def _download(self, key: str) -> None:
"""Downloads the given object to the configured working directory.
Args:
key: The key of the object to download
"""
file_path = os.path.join(self.settings.work_dir, key)
file_dir = os.path.dirname(file_path)
Path(file_dir).mkdir(parents=True, exist_ok=True)
self.bucket.download_file(key, file_path)
```
#### File: api/v1/test_account.py
```python
from fastapi.testclient import TestClient
def test_accounts(client: TestClient, raw_accounts):
response = client.get("/account")
assert response.status_code == 200
assert response.json() == raw_accounts
def test_account(client: TestClient, raw_accounts):
expected = list(raw_accounts.values())[0]
name = expected["name"]
response = client.get(f"/account/{name}")
assert response.status_code == 200
assert response.json()["name"] == name
assert response.json()["open"] == expected["open"]
assert response.json()["balance"] == expected["balance"]
response = client.get(f"/account/{name}123")
assert response.status_code == 404
def test_balance(client: TestClient, raw_accounts):
expected = list(raw_accounts.values())[0]
name = expected["name"]
response = client.get(f"/account/{name}/balance")
assert response.status_code == 200
assert response.json() == expected["balance"]
response = client.get(f"/account/{name}123/balance")
assert response.status_code == 404
def test_transactions(client: TestClient, raw_accounts, raw_entries):
account = list(raw_accounts.values())[0]
name = account["name"]
txns = [e for e in raw_entries if e["ty"] == "Transaction"]
expected = list(
filter(
lambda t: any(p["account"] == name for p in t["postings"]), txns
)
)
response = client.get(f"/account/{name}/transactions")
assert response.status_code == 200
assert response.json() == expected
response = client.get(f"/account/{name}123/transactions")
assert response.status_code == 404
```
#### File: api/v1/test_query.py
```python
from unittest import mock
from fastapi.testclient import TestClient
@mock.patch("bdantic.models.file.BeancountFile.query")
def test_query(query, client: TestClient, query_response):
query.return_value = query_response
response = client.get("/query?bql=test")
assert response.json() == query_response
query.assert_called_once_with("test")
```
#### File: core/storage/test_s3.py
```python
from unittest.mock import Mock, patch
import pytest
from app.core import settings
from app.core.storage import s3
@pytest.fixture
def mock_settings():
return settings.Settings(
entrypoint="test.beancount",
s3=s3.S3Config(bucket="test"),
work_dir="/run",
)
@patch("pathlib.Path.mkdir")
@patch("pathlib.Path.__init__")
def test_download(path_init, _, mock_settings):
bucket = Mock()
loader = s3.S3Storage(mock_settings)
loader.bucket = bucket
path_init.return_value = None
loader._download("test/key.file")
path_init.assert_called_once_with("/run/test")
bucket.download_file.assert_called_once_with(
"test/key.file", "/run/test/key.file"
)
@patch("app.core.beancount.from_file")
@patch("pathlib.Path.mkdir")
@patch("pathlib.Path.__init__")
def test_load(path_init, _, from_file, mock_settings):
bucket = Mock()
loader = s3.S3Storage(mock_settings)
loader.bucket = bucket
path_init.return_value = None
from_file.return_value = "file"
object = Mock()
object.key = "test/key.file"
bucket.objects.all.return_value = [object]
loader.load()
path_init.assert_any_call("/run")
bucket.objects.all.assert_called_once()
bucket.download_file.assert_called_once_with(
"test/key.file", "/run/test/key.file"
)
``` |
{
"source": "jmgilman/nox-helpers",
"score": 3
} |
#### File: nox-helpers/nox_helpers/linting.py
```python
from dataclasses import dataclass
import nox
from nox_helpers.tooling import Ctx, Tool
@dataclass
class Linter(Tool):
"""A tool which is used for linting source code.
This class provides a loose interface for tools which fall under the
category of linters.
"""
def lint(self, session: nox.Session, files: list[str], ctx: Ctx = Ctx()):
"""Lints the given list of files.
Args:
session: The session in which to run this tool.
files: A list of files to lint.
"""
pass
@dataclass
class Bandit(Linter):
"""A class for interacting with the bandit linter.
Args:
config: An optional configuration file to pass with all executions.
"""
binary: str = "bandit"
config: str = ""
def run(self, session: nox.Session, ctx: Ctx = Ctx()):
if self.config:
ctx += Ctx(flags=["--config", self.config])
super().run(session, ctx)
def lint(self, session: nox.Session, files: list[str], ctx: Ctx = Ctx()):
ctx += Ctx(args=files)
self.run(session, ctx)
@dataclass
class Flake8(Linter):
"""A class for interacting with the flake8 linter.
Args:
config: An optional configuration file to pass with all executions.
"""
binary: str = "flake8"
config: str = ""
def run(self, session: nox.Session, ctx: Ctx = Ctx()):
if self.config:
ctx += Ctx(flags=["--config", self.config])
super().run(session, ctx)
def lint(self, session: nox.Session, files: list[str], ctx: Ctx = Ctx()):
ctx += Ctx(args=files)
self.run(session, ctx)
@dataclass
class Mypy(Linter):
"""A class for interacting with the mypy linter.
Args:
config: An optional configuration file to pass with all executions.
"""
binary: str = "mypy"
config: str = ""
def run(self, session: nox.Session, ctx: Ctx = Ctx()):
if self.config:
ctx += Ctx(flags=["--config-file", self.config])
super().run(session, ctx)
def lint(self, session: nox.Session, files: list[str], ctx: Ctx = Ctx()):
ctx += Ctx(args=files)
self.run(session, ctx)
```
#### File: nox-helpers/nox_helpers/tooling.py
```python
from __future__ import annotations
from dataclasses import dataclass, field
from typing import TypeVar
import nox
T = TypeVar("T", bound="Tool")
@dataclass
class Ctx:
"""Context information passed when running a tool.
This class serves as the primary pipeline for getting data from the initial
call context all the way down to invoking session.run() with a Nox session.
It breaks up the normal run() input into a combination of flags, arguments,
and environment variables. The addition operator is defined to allow adding
two Ctx's together. In this case, the RHS flags/args are added after the
LHS flags/args. For example:
ctx = Ctx(flags=["--flag1"])
ctx += Ctx(flags=["--flag2"])
assert ctx.flags == ["--flag1", "--flag2"]
The constructor supports ingesting additional arbirary keyword arguments.
These are also additive and will be passed down to session.run().
Attributes:
flags: Command-line flags to pass
args: Command-line arguments to pass
env: Environment variables to be set
"""
flags: list[str]
args: list[str]
env: dict[str, str]
def __init__(
self,
flags: list[str] = [],
args: list[str] = [],
env: dict[str, str] = {},
**kwargs,
):
self.flags = flags
self.args = args
self.env = env
self.kwargs = kwargs
def __add__(self, other: Ctx):
return Ctx(
flags=self.flags + other.flags,
args=self.args + other.args,
env=self.env | other.env,
**(self.kwargs | other.kwargs),
)
@dataclass
class Tool:
"""An arbitrary tool used in a nox configuration.
This class serves as the base class from which all tools inherit from. It
provides methods for installing the tool and it's dependencies as well as
running arbitrary commands using the tool. Many tools are provided out of
the box by this package, however, additional ones can be created by
inheriting from this class.
Attributes:
binary: The name of the binary for this tool.
deps: Additional dependencies required to run this tool.
flags: A list of flags that are always included in executions.
"""
binary: str
deps: list[str] = field(default_factory=list)
flags: list[str] = field(default_factory=list)
def run(self, session: nox.Session, ctx: Ctx = Ctx()):
"""Runs this tool using the session and context.
Args:
session: The session to run this tool in.
ctx: The context to use for running the tool.
"""
ctx += Ctx(flags=self.flags)
session.run(
self.binary, *ctx.flags, *ctx.args, env=ctx.env, **ctx.kwargs
)
def setup(self, session: nox.Session, **kwargs) -> None:
"""Installs this tool and any declared dependencies using the session.
Args:
session: The session to install this tool in.
"""
session.install(self.binary, *self.deps, **kwargs)
class Tools(list[T]):
"""A list of Tool's.
This class provides a thin wrapper around a list of Tool objects. It
provides a single method for performing setup on all tools contained in the
list.
"""
def setup(self, session: nox.Session, **kwargs) -> None:
"""Installs all tools contained in this list using the session.
Args:
session: The session to install the tools in.
"""
for tool in self:
tool.setup(session, **kwargs)
```
#### File: nox-helpers/tests/test_formatting.py
```python
from unittest import mock
from nox_helpers import formatting, tooling
def test_black_run(session: mock.MagicMock):
b = formatting.Black(config="test.conf")
ctx = tooling.Ctx()
b.run(session, ctx)
session.run.assert_called_once_with(
"black", "--config", "test.conf", env={}
)
def test_black_setup(session: mock.MagicMock):
b = formatting.Black(deps=["dep1"])
b.setup(session)
session.install.assert_called_once_with("black", "dep1")
def test_black_check(session: mock.MagicMock):
b = formatting.Black()
ctx = tooling.Ctx()
b.check(session, ["file1", "file2"], ctx)
session.run.assert_called_once_with(
"black", "--check", "file1", "file2", env={}
)
def test_black_format(session: mock.MagicMock):
b = formatting.Black()
ctx = tooling.Ctx()
b.format(session, ["file1", "file2"], ctx)
session.run.assert_called_once_with("black", "file1", "file2", env={})
def test_isort_run(session: mock.MagicMock):
i = formatting.ISort(config="test.conf")
ctx = tooling.Ctx()
i.run(session, ctx)
session.run.assert_called_once_with(
"isort", "--settings-path", "test.conf", env={}
)
def test_isort_setup(session: mock.MagicMock):
i = formatting.ISort(deps=["dep1"])
i.setup(session)
session.install.assert_called_once_with("isort", "dep1")
def test_isort_check(session: mock.MagicMock):
i = formatting.ISort()
ctx = tooling.Ctx()
i.check(session, ["file1", "file2"], ctx)
session.run.assert_called_once_with(
"isort", "--check-only", "file1", "file2", env={}
)
def test_isort_format(session: mock.MagicMock):
i = formatting.ISort()
ctx = tooling.Ctx()
i.format(session, ["file1", "file2"], ctx)
session.run.assert_called_once_with("isort", "file1", "file2", env={})
```
#### File: nox-helpers/tests/test_tooling.py
```python
from unittest import mock
from nox_helpers import tooling
def test_ctx_add():
ctx = tooling.Ctx(
flags=["--flag1"], args=["arg1"], env={"env1": "val1"}, test1="test"
)
ctx += tooling.Ctx(
flags=["--flag2"], args=["arg2"], env={"env2": "val2"}, test2="test"
)
assert ctx.flags == ["--flag1", "--flag2"]
assert ctx.args == ["arg1", "arg2"]
assert ctx.env == {"env1": "val1", "env2": "val2"}
assert ctx.kwargs == {"test1": "test", "test2": "test"}
def test_tooling_run(session: mock.MagicMock):
t = tooling.Tool("tooling", flags=["--flag2"])
ctx = tooling.Ctx(
flags=["--flag1"], args=["arg1"], env={"env1": "val1"}, test1="test"
)
t.run(session, ctx)
session.run.assert_called_once_with(
"tooling",
ctx.flags[0],
"--flag2",
ctx.args[0],
env=ctx.env,
test1="test",
)
def test_tooling_setup(session: mock.MagicMock):
t = tooling.Tool("tooling", deps=["dep1", "dep2"])
t.setup(session, test="test1")
session.install.assert_called_once_with(
"tooling", "dep1", "dep2", test="test1"
)
def test_toolings_setup(session: mock.MagicMock):
t = [tooling.Tool("tooling1"), tooling.Tool("tooling2")]
ts = tooling.Tools(t)
ts.setup(session, test="test1")
session.install.assert_any_call("tooling1", test="test1")
session.install.assert_any_call("tooling2", test="test1")
assert session.install.call_count == 2
``` |
{
"source": "jmgilman/VaultSSH-Python",
"score": 3
} |
#### File: VaultSSH-Python/tests/test_common.py
```python
import os
import vaultssh.common as common
def test_get_signed_key_path():
test_path = "/home/user/.ssh/id_rsa.pub"
correct_path = "/home/user/.ssh/id_rsa-cert.pub"
assert common.get_signed_key_path(test_path) == correct_path
def test_get_token_file():
path = common.get_token_file()
assert path == os.path.join(os.path.expanduser("~"), ".vault-token")
def test_write_signed_key():
filename = "test.pub"
contents = "test"
common.write_signed_key(filename, contents)
with open(common.get_signed_key_path(filename), "r") as f:
assert f.read() == "test"
os.remove(common.get_signed_key_path(filename))
```
#### File: VaultSSH-Python/tests/test_main.py
```python
import os
from click.testing import CliRunner
from vaultssh.vaultssh import main
def test_main(mocker):
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(main)
assert 'Missing argument "SSH_PUBLIC_KEY"' in result.output
with open("test.pub", "w") as f:
f.write("Fake key data")
result = runner.invoke(main, ["test.pub"])
assert 'Missing argument "ROLE".' in result.output
os.environ["VAULT_ADDR"] = ""
result = runner.invoke(main, ["test.pub", "role"])
assert "No URL found" in result.output
os.environ["VAULT_ADDR"] = "htttp://foo.bar"
mocker.patch("vaultssh.auth.authenticate", return_value=None)
mocker.patch("hvac.Client.is_authenticated", return_value=True)
mocker.patch(
"hvac.Client.write", return_value={"data": {"signed_key": "test"}}
)
mocker.patch(
"vaultssh.common.get_signed_key_path", return_value="test.txt"
)
result = runner.invoke(main, ["test.pub", "role"])
assert "Signed key saved to test.txt" in result.output
with open("test.txt", "r") as f:
assert f.read() == "test"
assert result.exit_code == 0
```
#### File: VaultSSH-Python/vaultssh/vaultssh.py
```python
import getpass
import logging
import os
import click
import hvac
import vaultssh.auth as auth
import vaultssh.common as common
@click.command()
@click.option(
"--persist/--no-persist",
help="Whether to persist newly acquired tokens",
default=True,
)
@click.option(
"-s", "--server", help="The URL for the Vault server to query against"
)
@click.option("-t", "--token", help="The Vault token to authenticate with")
@click.option("-v", "--verbose", count=True)
@click.argument("ssh_public_key", type=click.File("r"))
@click.argument("role")
def main(ssh_public_key, role, persist, server, token, verbose):
""" Sign SSH_PUBLIC_KEY using the given Vault ROLE
\b
SSH_PUBLIC_KEY must be a file path to a valid SSH public key file
ROLE must be a valid configured role in the Vault server
"""
# Configure logging
common.configure_logging(verbose)
# Instantiate client
client = hvac.Client()
# Check for url
client.url = server if server else client.url
if not client.url:
logging.info("No url address to Vault server supplied")
click.echo(
"No URL found - please set VAULT_ADDR environment variable or manually pass a server url"
)
exit(1)
# Check for authentication
client.token = token if token else client.token
logging.debug(f"Token set to {client.token}")
logging.debug(f"URL set to {client.url}")
if not client.is_authenticated():
auth.authenticate(client, persist)
# Sign key
try:
result = client.write(
"ssh/sign/" + role, public_key=ssh_public_key.read()
)
except hvac.exceptions.InvalidRequest:
logging.fatal("Error signing SSH key", exc_info=True)
exit(1)
# Write the signed certificate
common.write_signed_key(ssh_public_key.name, result["data"]["signed_key"])
``` |
{
"source": "jmgilmer/mpnn",
"score": 3
} |
#### File: jmgilmer/mpnn/graph_util.py
```python
import tensorflow as tf
def feed_forward_nn(input_tensor,
num_hidden_layers,
output_dim,
keep_prob=None,
hidden_dim=-1,
activation="tanh",
normalizer="none"):
"""Creates a fully connected feed forward neural network.
Args:
input_tensor: shape [batch_size*num_nodes, input_dim], assumed to be
the final node states after the propgation step concat with the
initial nodes.
num_hidden_layers (int32): number of hidden layers in the network
set to 0 for a linear network.
output_dim (int32): dimension of the output of the network.
keep_prob (scalar tensor or float): Dropout keep prob.
hidden_dim (int32): size of the hidden layers
activation (string): tanh or relu
normalizer (string): layer or none
Returns:
tensor of shape [batch_size * num_nodes, output_dim]
note there is no non-linearity applied to the output.
Raises:
Exception: If given activation or normalizer not supported.
"""
if activation == "tanh":
act = tf.tanh
elif activation == "relu":
act = tf.nn.relu
else:
raise ValueError("Invalid activation: {}".format(activation))
if normalizer == "layer":
norm = tf.contrib.layers.layer_norm
elif normalizer == "none":
norm = None
else:
raise ValueError("Invalid normalizer: {}".format(normalizer))
h_nn = input_tensor # first set of "hidden" units is the input
for i in xrange(num_hidden_layers):
with tf.name_scope("fully_connected/layer{}".format(i + 1)):
layer_dim = h_nn.get_shape()[1].value
w = tf.get_variable("W{}".format(i), shape=[layer_dim, hidden_dim])
b = tf.get_variable("b{}".format(i), shape=[hidden_dim])
h_nn = act(tf.matmul(h_nn, w) + b)
if norm is not None:
h_nn = norm(h_nn)
if keep_prob is not None:
h_nn = tf.nn.dropout(h_nn, keep_prob)
tf.summary.histogram("h_nn{}".format(i), h_nn)
layer_dim = h_nn.get_shape()[1].value
output_w = tf.get_variable("output_W", shape=[layer_dim, output_dim])
output_b = tf.get_variable("output_b", shape=[output_dim])
# final output has no non-linearity, this is applied outside this function
nn_output = tf.matmul(h_nn, output_w) + output_b
return nn_output
``` |
{
"source": "jmgindi/synth",
"score": 3
} |
#### File: synth/synth_part_builder/part_builder.py
```python
import os
from part_builder import PartBuilderException
class PartBuilder():
"""
Part builder class for use with building the compose file
and nginx router file
"""
allowed_frontends = ['static', 'dynamic', 'react', 'reactjs']
allowed_backends = ['node', 'flask', 'django']
allowed_databases = ['mongo', 'postgres', 'mysql', 'mariadb']
allowed_caches = ['redis', 'memcached']
def __init__(self, parts_root=None, project_name=None, front_enabled=False, back_enabled=False):
"""
Init method for class, sets important path information
"""
# do some checking on the config info passed
self.str_check(
parts_root, "root to parts directory must be of type string in PartBuilder init function")
self.str_check(
project_name, "default.conf file for NGINX router must be of type string in PartBuilder init function")
nginx_file = "{}/nginx_router/nginx_conf/default.conf".format(
project_name)
compose_file = "{}/docker-compose.yml".format(project_name)
if not os.path.isfile(nginx_file):
raise PartBuilderException(
"{} is not a file or does not exist".format(nginx_file))
if not os.path.isfile(compose_file):
raise PartBuilderException(
"{} is not a file or does not exist".format(compose_file))
# if all is good we got here without raising an exception, set instance to init info
self.parts_root = parts_root
self.project_name = project_name
self.nginx_file = nginx_file
self.compose_file = compose_file
self.allowed_master = []
self.allowed_master.extend(self.allowed_frontends)
self.allowed_master.extend(self.allowed_backends)
self.allowed_master.extend(self.allowed_databases)
self.allowed_master.extend(self.allowed_caches)
self.compose_router_update(
front_enabled=front_enabled, back_enabled=back_enabled)
@staticmethod
def str_check(param=None, err_msg="Path Error"):
"""
Checks that variables passed to PartBuilder functions
are not None and exist
Based on:
param: variable to check
err_msg: error message to output
"""
if param is None:
raise PartBuilderException(err_msg)
elif type(param) != str:
raise PartBuilderException(err_msg)
# ---> pipeline build section <---
def build_pipeline(self, name, pipeline, parts={}):
"""
builds the config file for the ci / cd selected by user for the parts provided
Based on:
name: name of the project, used for tagging docker builds
pipeline: pipeline being used (travis or CircleCI)
parts: list of parts for project
"""
if (len(parts) == 0) or (parts.values() == [None for tmp in range(len(parts.values()))]):
raise PartBuilderException(
'PartBuilder cannot build CI/CD pipeline with no parts provided')
parts_path = "{}/pipeline/{}".format(self.parts_root, pipeline)
base_part_path = "{}/base.part".format(parts_path)
config_path = None
if pipeline == 'travis':
config_path = "{}/.travis.yml".format(self.project_name)
else:
raise PartBuilderException(
"Pipeline ({}) is not yet configured for synth!")
# build the base of the config file for chosen CI/CD framework
with open(base_part_path, 'r') as base_part:
part_data = base_part.readlines()
with open(config_path, 'w') as new_config:
new_config.writelines(part_data)
# build the rest of the config file
self.build_pipeline_section_pre_tests(
pipeline, parts, parts_path, config_path)
self.build_pipeline_section_tests(
pipeline, parts, parts_path, config_path)
self.build_pipeline_section_deploy(
pipeline, parts, parts_path, config_path)
with open(config_path, 'r') as cur_config:
conf_data = cur_config.readlines()
new_data = []
for line in conf_data:
line = line.format(self.project_name)
new_data.append(line)
with open(config_path, 'w') as new_config:
new_config.writelines(new_data)
# should not be used outside of class
def build_pipeline_section_pre_tests(self, pipeline, parts, parts_path, config_path):
"""
builds the before_install section to build containers for testing
Base on:
pipeline: pipeline being used (travis or CircleCI)
parts: list of parts to test
parts_path: path to pipeline parts location
config_path: path to config file for pipeline
e.g: .travis.yml
"""
# grab current config data from file so its not overwritten,
# more efficient than constantly writing to the file with append mode in loop
parts_path += "/pre_tests"
config_data = []
with open(config_path, 'r') as base_config:
config_data = base_config.readlines()
with open("{}/base.part".format(parts_path), 'r') as base_part:
part_data = base_part.readlines()
config_data.extend(part_data)
for part_name, part in parts.items():
# all parts passed by default, some None
# no need to deploy/test cache or database as image is just the official image, no customizations for prod
# also dynamic and static don't have default tests
if part is None or part_name == "cache" or part_name == "database" or part in ['dynamic', 'static']:
continue
# skip if file is missing (not set up)
# part_name is used here because not specific part files, just frontend or backend
if not os.path.isfile("{}/{}.part".format(parts_path, part_name)):
continue
# do the building for before_install
# again part_name is used due to directory name scheme
with open("{}/{}.part".format(parts_path, part_name), 'r') as file:
part_data = file.readlines()
config_data.extend(part_data)
with open(config_path, 'w') as new_config:
new_config.writelines(config_data)
# should not be used outside of class
def build_pipeline_section_tests(self, pipeline, parts, parts_path, config_path):
"""
builds the before_install section to build containers for testing
Base on:
pipeline: pipeline being used (travis or CircleCI)
parts: list of parts to test
parts_path: path to pipeline parts location
config_path: path to config file for pipeline
e.g: .travis.yml
"""
# grab current config data from file so its not overwritten,
# more efficient than constantly writing to the file with append mode in loop
parts_path += "/tests"
config_data = []
with open(config_path, 'r') as base_config:
config_data = base_config.readlines()
with open("{}/base.part".format(parts_path), 'r') as base_part:
part_data = base_part.readlines()
config_data.extend(part_data)
for part_name, part in parts.items():
# all parts passed by default, some None
# no need to deploy/test cache or database as image is just the official image, no customizations for prod
if part is None or part_name == "cache" or part_name == "database":
continue
# skip if file is missing (not set up)
if not os.path.isfile("{}/{}.part".format(parts_path, part)):
continue
# do the building for before_install
with open("{}/{}.part".format(parts_path, part), 'r') as file:
part_data = file.readlines()
config_data.extend(part_data)
with open(config_path, 'w') as new_config:
new_config.writelines(config_data)
# should not be used outside of class
# TODO implement
def build_pipeline_section_deploy(self, pipeline, parts, parts_path, config_path):
"""
builds the before_install section to build containers for testing
Base on:
pipeline: pipeline being used (travis or CircleCI)
parts: list of parts to test
parts_path: path to pipeline parts location
config_path: path to config file for pipeline
e.g: .travis.yml
"""
# grab current config data from file so its not overwritten,
# more efficient than constantly writing to the file with append mode in loop
parts_path += "/deploy"
config_data = []
with open(config_path, 'r') as base_config:
config_data = base_config.readlines()
with open("{}/base.part".format(parts_path), 'r') as base_part:
part_data = base_part.readlines()
config_data.extend(part_data)
# add the build stage for the docker deploy
# add the nginx router, always included
# not included in parts dict because dict has no order
build_dir = parts_path + "/build"
with open("{}/router.part".format(build_dir), 'r') as router_part:
part_data = router_part.readlines()
config_data.extend(part_data)
for part_name, part in parts.items():
# all parts passed by default, some None
# no need to deploy/test cache or database as image is just the official image, no customizations for prod
if part is None or part_name == "cache" or part_name == "database":
continue
# skip if file is missing (not set up)
if not os.path.isfile("{}/{}.part".format(build_dir, part_name)):
continue
# add the build stage for the part
with open("{}/{}.part".format(build_dir, part_name), 'r') as file:
part_data = file.readlines()
config_data.extend(part_data)
# add the nginx router, always included
# not included in parts dict because dict has no order
push_dir = parts_path + "/push"
with open("{}/router.part".format(push_dir), 'r') as router_part:
part_data = router_part.readlines()
config_data.extend(part_data)
# add the push stage for docker deploy to hub
for part_name, part in parts.items():
# all parts passed by default, some None
# no need to deploy/test cache or database as image is just the official image, no customizations for prod
if part is None or part_name == "cache" or part_name == "database":
continue
# skip if file is missing (not set up)
if not os.path.isfile("{}/{}.part".format(push_dir, part_name)):
continue
with open("{}/{}.part".format(push_dir, part_name), 'r') as file:
part_data = file.readlines()
config_data.extend(part_data)
with open(config_path, 'w') as new_config:
new_config.writelines(config_data)
# ---> nginx and compose build section <---
def add_part(self, part=None, database=None, cache=None):
"""
adds a part to compose and nginx files based on a string passed
Based on:
part: string representing part to add,
e.g: static
"""
self.str_check(
part, "PartBuilder cannot add part of type {}".format(type(part)))
part = part.lower()
# append neccessary content for the part to the compose and nginx config files
if part in self.allowed_master:
# build the NGINX router default.conf
self.upstream_add(self.parts_root +
'/nginx/upstream/{}.part'.format(part), self.nginx_file)
self.location_add(self.parts_root +
'/nginx/location/{}.part'.format(part), self.nginx_file)
# build the docker-compose file
self.compose_add(self.parts_root +
'/compose/{}.part'.format(part), self.compose_file)
if part in self.allowed_backends and (database is not None or cache is not None):
self.backend_compose_update(database, cache)
else:
raise PartBuilderException(
"part provided to PartBuilder ({}) is not in allowed_master".format(part))
def compose_router_update(self, front_enabled=False, back_enabled=False):
"""
must be run before all other things dealing with compose building
due to relating with the router
"""
if not front_enabled and not back_enabled:
return
self.compose_add(
self.parts_root + '/compose/depends/base.part',
self.compose_file)
if front_enabled:
self.compose_add(
self.parts_root + '/compose/depends/frontend.part',
self.compose_file
)
if back_enabled:
self.compose_add(
self.parts_root + '/compose/depends/backend.part',
self.compose_file
)
def backend_compose_update(self, database, cache):
"""
updates the compose file after adding a part if its a backend by adding
neccessary environmental variables and depends_on sections
"""
if (database not in self.allowed_databases and cache not in self.allowed_caches):
raise PartBuilderException(
"backend_compose_update failed because database or cache not in allowed services"
)
self.compose_add(
self.parts_root + '/compose/depends/base.part',
self.compose_file)
if database in self.allowed_databases:
self.compose_add(
self.parts_root +
'/compose/depends/{}.part'.format(database),
self.compose_file
)
if cache in self.allowed_caches:
self.compose_add(
self.parts_root +
'/compose/depends/{}.part'.format(cache),
self.compose_file
)
# add environment variables for cache and database
self.compose_add(
self.parts_root + '/compose/env/base.part',
self.compose_file
)
if database in self.allowed_databases:
self.compose_add(
self.parts_root +
'/compose/env/{}.part'.format(database),
self.compose_file
)
if cache in self.allowed_caches:
self.compose_add(
self.parts_root +
'/compose/env/{}.part'.format(cache),
self.compose_file
)
def compose_add(self, part_path=None, config_path=None):
"""
Adds a part to the master docker-compose file
Based on:
part_path: path to the part to add
compose_path: path to master compose file to add to
"""
path_err = "Path to compose service part (part_path) must be of string type"
config_err = "Path to docker-compose file (config_path) must be of string type"
self.str_check(part_path, path_err)
self.str_check(config_path, config_err)
# all parts should have a compose portion
if os.path.isfile(part_path) is False:
raise PartBuilderException(
"{} is not a file or did not exist.".format(part_path))
# read the part file and config file text
with open(part_path, 'r') as part_file:
part_data = part_file.readlines()
with open(config_path, 'r') as file:
cur_config = file.readlines()
# add the part text onto the front of the config
cur_config.extend(part_data)
# write the new compose config file
with open(config_path, 'w') as new_config:
new_config.writelines(cur_config)
def upstream_add(self, part_path=None, config_path=None):
"""
Adds an upstream to the NGINX router default.conf file
ONLY needed for frontend and backend portions
Based on:
part_path: path to the part containing the upstream
config_path: path to the NGINX router default.conf file
"""
path_err = "Path to upstream part (part_path) must be of string type"
config_err = "Path to NGINX router file (config_path) must be of string type"
self.str_check(part_path, path_err)
self.str_check(config_path, config_err)
# skip if it's not present (not an error b/c database and cache are always not present)
if os.path.isfile(part_path) is False:
return None
# read the part file and config file text
with open(part_path, 'r') as part_file:
part_data = part_file.readlines()
with open(config_path, 'r') as file:
cur_config = file.readlines()
# add the part text onto the front of the config
part_data.extend(cur_config)
# write the new NGINX config file
with open(config_path, 'w') as new_config:
new_config.writelines(part_data)
def location_add(self, part_path=None, config_path=None):
"""
Adds a location block to the server block in the NGINX router
default.conf file
This is needed for routing requests to the upstream
Based on:
part_path: path to the part containing the location
config_path: path to the NGINX router default.conf file
"""
path_err = "Path to location part (part_path) must be of string type"
config_err = "Path to NGINX router file (config_path) must be of string type"
self.str_check(part_path, path_err)
self.str_check(config_path, config_err)
# skip if it's not present (not an error b/c database and cache are always not present)
if os.path.isfile(part_path) is False:
return None
# read the part data and the nginx config default data
with open(part_path, 'r') as part_file:
part_data = part_file.readlines()
with open(config_path, 'r') as file:
cur_config = file.readlines()
# delete the bracket at the end of the nginx config
del cur_config[-1]
# add the new content to the NGINX config at the end
# and readd the bracket
cur_config.extend(part_data)
cur_config.append('}')
# write the data to the new NGINX config file
with open(config_path, 'w') as new_config:
new_config.writelines(cur_config)
``` |
{
"source": "jmgirven/warehouse",
"score": 3
} |
#### File: warehouse/application/urls.py
```python
from flask import render_template
from application import app
from application import parse
from application import views
## URL dispatch rules
# App Engine warm up handler
# See http://code.google.com/appengine/docs/python/config/appconfig.html#Warming_Requests
#app.add_url_rule('/_ah/warmup', 'warmup', view_func=views.warmup)
# Home page
app.add_url_rule('/', 'show_main', view_func=views.show_main)
# Parse pages
app.add_url_rule('/go', 'parse_sites', view_func=parse.parse_sites)
# Search
app.add_url_rule('/search/<searchStr>', 'search', view_func=views.search)
### Error handlers
## Handle 404 errors
<EMAIL>(404)
#def page_not_found(e):
# """Return a custom 404 error."""
# return views.page_not_found()
#
#
## Handle 500 errors
<EMAIL>(500)
#def page_exception(e):
# """Return a custom 500 error."""
# return views.page_exception()
```
#### File: warehouse/application/views.py
```python
from flask import make_response
from flask import render_template
from application import app
from application.model import Item
def show_main():
"""Show main page"""
itemsQuery = Item.query().order(-Item.created)
items = itemsQuery.fetch(100)
return table_of_results(items)
def search(searchStr):
"""Search catagory"""
itemsQuery = Item.query(Item.catagory==searchStr).order(-Item.created)
items = itemsQuery.fetch(100)
return table_of_results(items)
def table_of_results(items):
response = '<html><body><h2>Items</h2><ul>'
for item in items:
response += '<li><span>%s</span> --- <span>£%s</span></li>'\
% (item.title, item.price)
response += '</ul></body></html>'
return response
def page_not_found():
"""Show error page"""
response = make_response(
render_template('error.html'))
response.headers['Content-Type'] = 'text/html'
return response
def page_exception():
"""Show error page"""
response = make_response(
render_template('error.html'))
response.headers['Content-Type'] = 'text/html'
return response
if __name__ == '__main__':
app.run()
#
``` |
{
"source": "jm-glowienke/Breakthru",
"score": 3
} |
#### File: jm-glowienke/Breakthru/negamax.py
```python
from transition import Board
import random
import time
import tools
class NegaMax(object):
def __init__(self,board,player):
self.state = board
self.player = player
self.score = -9999999
self.time = time.time()
def get_opponent(self,player):
if player == 'gold':
return 'silver'
elif player == 'silver':
return 'gold'
else: raise Exception
def get_val(self,player,depth,alpha,beta):
# print(alpha,beta)
# run NegaMax algorithm
if depth == 0 or self.state.is_terminal() == True:
return self.utility(self.state,player),[]
self.score = -9999999
best_move = []
childNodes = self.state.get_all_moves(player)
childNodes = self.order_moves(childNodes)
childNodes = tools.remove_double_moves(childNodes)
for child in childNodes:
# if time.time() - self.time > 30:
# print("Search timed out!")
# return self.score, best_move
src = child[0]
dest = child[1]
dest_object = self.state.get_board()[dest[0]][dest[1]]
moves_left = self.state.make_simulated_move(player, src, dest, 2)
if moves_left == 0: # single move perfomed
value, best_sub_move = self.get_val(self.get_opponent(player),depth - 1,-beta,-alpha)
value = -value
self.state.undo_simulated_move(src,dest,dest_object)
if value > self.score: self.score = value
if self.score > alpha:
alpha = self.score
best_move = [[src,dest]]
best_move.append(best_sub_move)
if self.score >= beta:
break
elif moves_left == 1: # two moves performed
for child2 in child[3]:
src_2 = child2[0]
dest_2 = child2[1]
dest_object_2 = self.state.get_board()[dest_2[0]][dest_2[1]]
moves_left = self.state.make_simulated_move(player,src_2,dest_2,1)
value, best_sub_move = self.get_val(self.get_opponent(player),depth-1,-beta,-alpha)
value = -value
self.state.undo_simulated_move(src_2,dest_2,dest_object_2)
if value > self.score: self.score = value
if self.score > alpha:
alpha = self.score
best_move = [[src,dest]]
best_move.append([src_2,dest_2])
best_move.append(best_sub_move)
if self.score >= beta:
break
self.state.undo_simulated_move(src,dest,dest_object)
return self.score, best_move
def order_moves(self,moves):
moves.sort(key = lambda x: x[2])
return moves
def utility(self,state,player):
if player == 'gold':
if state.is_terminal() == True and state.get_winner() == 'GOLD':
return 30
elif state.is_terminal():
return -30
else:
opp = self.get_opponent(player)
flag_attack = 0
attack = 0
direct_access = 0
flag_covered = 0
positions = state.get_all_positions(player)
number_ships_left = len(positions)
silver_number_ships_left = state.get_number_pieces('silver')
for pos in positions:
if pos[0]+1 <= 10 and pos[1]-1>=0 \
and state.get_player_at_field([pos[0]+1,pos[1]-1]) == opp:
if state.get_board()[pos[0]][pos[1]] == 3:
flag_attack += 1
else:
attack += 1
if pos[0]-1 >= 0 and pos[1]-1 >= 0 \
and state.get_player_at_field([pos[0]-1,pos[1]-1]) == opp:
if state.get_board()[pos[0]][pos[1]] == 3:
flag_attack += 1
else:
attack += 1
if pos[0]-1 >= 0 and pos[1]+1 <= 10 \
and state.get_player_at_field([pos[0]-1,pos[1]+1]) == opp:
if state.get_board()[pos[0]][pos[1]] == 3:
flag_attack += 1
else:
attack += 1
if pos[0]+1 <= 10 and pos[1]+1 <= 10 \
and state.get_player_at_field([pos[0]+1,pos[1]+1]) == opp:
if state.get_board()[pos[0]][pos[1]] == 3:
flag_attack += 1
else:
attack += 1
if state.get_board()[pos[0]][pos[1]] == 3:
k = 1
while (pos[0] - k) >= 0 and state.get_player_at_field([pos[0]-k,pos[1]]) == 'empty':
if pos[0] - k == 0:
direct_access += 1
k += 1
k = 1
while (pos[0] + k) <= 10 and state.get_player_at_field([pos[0]+k,pos[1]]) == 'empty':
if pos[0] + k == 10:
direct_access += 1
k += 1
k = 1
while (pos[1] - k) >= 0 and state.get_player_at_field([pos[0],pos[1]-k]) == 'empty':
if pos[0] - k == 0:
direct_access += 1
k += 1
k = 1
while (pos[1] + k) <= 10 and state.get_player_at_field([pos[0],pos[1]+k]) == 'empty':
if pos[1] + k == 10:
direct_access += 1
k += 1
if pos[0]+1 <= 10 and pos[1]-1>=0 \
and state.get_player_at_field([pos[0]+1,pos[1]-1]) == player:
flag_covered += 1
if pos[0]-1 >= 0 and pos[1]-1 >= 0 \
and state.get_player_at_field([pos[0]-1,pos[1]-1]) == player:
flag_covered += 1
if pos[0]-1 >= 0 and pos[1]+1 <= 10 \
and state.get_player_at_field([pos[0]-1,pos[1]+1]) == player:
flag_covered += 1
if pos[0]+1 <= 10 and pos[1]+1 <= 10 \
and state.get_player_at_field([pos[0]+1,pos[1]+1]) == player:
flag_covered += 1
utility = number_ships_left - 4*flag_attack - attack + 6 * direct_access + 3 * flag_covered - silver_number_ships_left
return utility
elif player == 'silver':
if state.is_terminal() == True and state.get_winner() == 'SILVER':
return 30
elif state.is_terminal():
return -30
else:
opp = self.get_opponent(player)
flag_attack = 0
attack = 0
direct_access = 0
flag_covered = 0
positions = state.get_all_positions(player)
number_ships_left = len(positions)
gold_number_ships_left = state.get_number_pieces('gold')
for pos in positions:
if pos[0]+1 <= 10 and pos[1]-1>=0 \
and state.get_player_at_field([pos[0]+1,pos[1]-1]) == opp:
if state.get_board()[pos[0]+1][pos[1]-1] == 3:
flag_attack += 1
else:
attack += 1
if pos[0]-1 >= 0 and pos[1]-1 >= 0 \
and state.get_player_at_field([pos[0]-1,pos[1]-1]) == opp:
if state.get_board()[pos[0]-1][pos[1]-1] == 3:
flag_attack += 1
else:
attack += 1
if pos[0]-1 >= 0 and pos[1]+1 <= 10 \
and state.get_player_at_field([pos[0]-1,pos[1]+1]) == opp:
if state.get_board()[pos[0]-1][pos[1]+1] == 3:
flag_attack += 1
else:
attack += 1
if pos[0]+1 <= 10 and pos[1]+1 <= 10 \
and state.get_player_at_field([pos[0]+1,pos[1]+1]) == opp:
if state.get_board()[pos[0]+1][pos[1]+1] == 3:
flag_attack += 1
else:
attack += 1
# Analyse flagship situation
k = 0
for row in state.get_board():
if 3 in row:
pos = [k,row.index(3)]
if state.get_board()[pos[0]][pos[1]] == 3:
k = 1
while (pos[0] - k) >= 0 and state.get_player_at_field([pos[0]-k,pos[1]]) == 'empty':
if pos[0] - k == 0:
direct_access += 1
k += 1
k = 1
while (pos[0] + k) <= 10 and state.get_player_at_field([pos[0]+k,pos[1]]) == 'empty':
if pos[0] + k == 10:
direct_access += 1
k += 1
k = 1
while (pos[1] - k) >= 0 and state.get_player_at_field([pos[0],pos[1]-k]) == 'empty':
if pos[0] - k == 0:
direct_access += 1
k += 1
k = 1
while (pos[1] + k) <= 10 and state.get_player_at_field([pos[0],pos[1]+k]) == 'empty':
if pos[1] + k == 10:
direct_access += 1
k += 1
if pos[0]+1 <= 10 and pos[1]-1>=0 \
and state.get_player_at_field([pos[0]+1,pos[1]-1]) == opp:
flag_covered += 1
if pos[0]-1 >= 0 and pos[1]-1 >= 0 \
and state.get_player_at_field([pos[0]-1,pos[1]-1]) == opp:
flag_covered += 1
if pos[0]-1 >= 0 and pos[1]+1 <= 10 \
and state.get_player_at_field([pos[0]-1,pos[1]+1]) == opp:
flag_covered += 1
if pos[0]+1 <= 10 and pos[1]+1 <= 10 \
and state.get_player_at_field([pos[0]+1,pos[1]+1]) == opp:
flag_covered += 1
else:
k+= 1
utility = number_ships_left + 4*flag_attack + attack - 6 * direct_access - 3 * flag_covered - gold_number_ships_left
return utility
``` |
{
"source": "jm-glowienke/fairseq",
"score": 2
} |
#### File: fairseq/models/transformer_xlm_iwslt_decoder.py
```python
import os
from typing import Any, Dict
from fairseq import checkpoint_utils
from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDictionary
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer import (
TransformerDecoder,
TransformerEncoder,
TransformerModel,
base_architecture as transformer_base_architecture,
)
@register_model("transformer_xlm_iwslt_decoder")
class TransformerFromPretrainedXLMModel(TransformerModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
TransformerModel.add_args(parser)
parser.add_argument(
"--pretrained-xlm-checkpoint",
type=str,
metavar="STR",
help="XLM model to use for initializing transformer encoder "
"and/or decoder",
)
parser.add_argument(
"--init-encoder-only",
action="store_true",
help="if set, don't load the XLM weights and embeddings into "
"decoder",
)
parser.add_argument(
"--init-decoder-only",
action="store_true",
help="if set, don't load the XLM weights and embeddings into "
"encoder",
)
@classmethod
def build_model(self, args, task, cls_dictionary=MaskedLMDictionary):
assert hasattr(args, "pretrained_xlm_checkpoint"), (
"You must specify a path for --pretrained-xlm-checkpoint to use "
"--arch transformer_from_pretrained_xlm"
)
assert isinstance(task.source_dictionary,
cls_dictionary) and isinstance(
task.target_dictionary, cls_dictionary
), (
"You should use a MaskedLMDictionary when using --arch "
"transformer_from_pretrained_xlm because the pretrained XLM model "
"was trained using data binarized with MaskedLMDictionary. "
"For translation, you may want to use --task "
"translation_from_pretrained_xlm"
)
assert not (
getattr(args, "init_encoder_only", False)
and getattr(args, "init_decoder_only", False)
), "Only one of --init-encoder-only and --init-decoder-only can be set."
return super().build_model(args, task)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoderFromPretrainedXLM(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoder(
args,
tgt_dict,
embed_tokens,
no_encoder_attn=getattr(args, "no_cross_attention", False),
)
def upgrade_state_dict_with_xlm_weights(
state_dict: Dict[str, Any], pretrained_xlm_checkpoint: str
) -> Dict[str, Any]:
"""
Load XLM weights into a Transformer encoder or decoder model.
Args:
state_dict: state dict for either TransformerEncoder or
TransformerDecoder
pretrained_xlm_checkpoint: checkpoint to load XLM weights from
Raises:
AssertionError: If architecture (num layers, attention heads, etc.)
does not match between the current Transformer encoder or
decoder and the pretrained_xlm_checkpoint
"""
if not os.path.exists(pretrained_xlm_checkpoint):
raise IOError(
"Model file not found: {}".format(pretrained_xlm_checkpoint))
state = checkpoint_utils.load_checkpoint_to_cpu(pretrained_xlm_checkpoint)
xlm_state_dict = state["model"]
for key in xlm_state_dict.keys():
for search_key in ["embed_tokens", "embed_positions", "layers"]:
if search_key in key:
subkey = key[key.find(search_key):]
if "in_proj_weight" in subkey or \
"in_proj_bias" in subkey:
continue
else:
assert subkey in state_dict, (
"{} \nTransformer encoder / decoder "
"state_dict does not contain {}. \nCannot "
"load {} from pretrained XLM checkpoint "
"{} into Transformer.".format(
str(state_dict.keys()), subkey, key,
pretrained_xlm_checkpoint
)
)
state_dict[subkey] = xlm_state_dict[key]
return state_dict
class TransformerEncoderFromPretrainedXLM(TransformerEncoder):
def __init__(self, args, dictionary, embed_tokens):
super().__init__(args, dictionary, embed_tokens)
if getattr(args, "init_decoder_only", False):
# Don't load XLM weights for encoder if --init-decoder-only
return
assert hasattr(args, "pretrained_xlm_checkpoint"), (
"--pretrained-xlm-checkpoint must be specified to load Transformer "
"encoder from pretrained XLM"
)
if args.pretrained_xlm_checkpoint != 'interactive':
xlm_loaded_state_dict = upgrade_state_dict_with_xlm_weights(
state_dict=self.state_dict(),
pretrained_xlm_checkpoint=args.pretrained_xlm_checkpoint,
)
self.load_state_dict(xlm_loaded_state_dict, strict=True)
# class TransformerDecoderFromPretrainedXLM(TransformerDecoder):
# def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
# super().__init__(args, dictionary, embed_tokens, no_encoder_attn)
# if getattr(args, "init_encoder_only", False):
# # Don't load XLM weights for decoder if --init-encoder-only
# return
# assert hasattr(args, "pretrained_xlm_checkpoint"), (
# "--pretrained-xlm-checkpoint must be specified to load Transformer "
# "decoder from pretrained XLM"
# )
#
# xlm_loaded_state_dict = upgrade_state_dict_with_xlm_weights(
# state_dict=self.state_dict(),
# pretrained_xlm_checkpoint=args.pretrained_xlm_checkpoint,
# )
# self.load_state_dict(xlm_loaded_state_dict, strict=True)
@register_model_architecture(
"transformer_xlm_iwslt_decoder", "transformer_xlm_iwslt_decoder")
def transformer_xlm_iwslt_decoder(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 3072)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 6)
transformer_base_architecture(args)
``` |
{
"source": "jmgonzmart/ensembl-analysis",
"score": 2
} |
#### File: scripts/genebuild/update_assembly_registry_sheet.py
```python
import mysql.connector
from mysql.connector import Error
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import pprint
import unicodedata
import time
import datetime
import argparse
#import traceback
def fetch_db_data(query,database,host,port,user,password):
try:
conn = mysql.connector.connect(database=database,
host=host,
port=port,
user=user,
password=password)
cursor = conn.cursor()
cursor.execute(query)
rows = cursor.fetchall()
except Error as e:
print(e)
finally:
cursor.close()
conn.close()
return rows
def update_assembly_sheet(assembly_db_data,meta_db_data,existing_sheet_records,assembly_sheet,gettime,worksheet_name):
# This method creates a dictionary for both the lists from the db and sheets and makes both
# dicts key on the versioned GCA (which is unique). Once the dicts are generated keys in the
# assembly db dict are compared to the keys in the sheets dict. If a key is not in the sheets
# dict then it is a new entry and gets made into a new row and added to the sheet. If the key
# is present then some tests are done to see if anything needs updating. Some of these tests
# could be made generic, but there are some complex cases like when the filters need updating
# The filters are basically tags for the assemblies that are then used to create the filter
# views in sheets
min_contig_n50_filter = 100000
assembly_db_dict = {}
existing_sheet_dict = {}
max_version_dict = {}
existing_annotations_dict = {}
# This ordering needs to match the ordering of the query on the assembly db
assembly_db_columns = ['subspecies_name','common_name','chain','version','clade','contig_N50','assembly_level','assembly_date','refseq_accession','assembly_name','genome_rep','rnaseq_data', 'genebuilder','progress_status','assembly_group']
# This ordering needs to match the ordering of the columns on the sheet
assembly_sheet_columns = ['GCA','Clade','Species name','Common name','Contig N50','Assembly level','Assembly date','Assembly name','RNAseq data','RefSeq accession','Genebuilder','Status','Assembly group','Expected release','Grant','Notes','Filter: Max version','Filter: Genome rep','Filter: N50','Filter: Non-human']
# This makes a dict for the db on the versioned GCA and also makes a dict to track the highest
# version for a particular GCA (used in filtering later)
# Note the db has entries that are in unicode in some cases and need to be converted
for row in assembly_db_data:
chain = row[assembly_db_columns.index('chain')]
version = row[assembly_db_columns.index('version')]
chain.encode('ascii','ignore')
gca = make_gca(chain,version)
assembly_db_dict[gca] = row
if chain in max_version_dict:
current_max_version = max_version_dict[chain]
if version > current_max_version:
max_version_dict[chain] = version
else:
max_version_dict[chain] = version
# This makes an existing annotations dict based on the meta data db. Note that this db only
# goes back to e80, so there is a small chance that assemblies that were once annotated are not marked
# as handed over in the filters, but this shouldn't be a problem
for row in meta_db_data:
gca = row[0]
gca.encode('ascii','ignore')
existing_annotations_dict[gca] = 1
# This just makes a dict for the sheet based on the versioned GCA
for row in existing_sheet_records:
gca = row[0]
gca.encode('ascii','ignore')
if(gca == 'GCA'):
next
else:
existing_sheet_dict[gca] = row
# This is where the majority of the work occurs. All assembly GCAs are examined to determined what
# should be added/updated
# Note that currently a three second sleep is needed to avoid exhausting the Sheets REST API quota
for gca in assembly_db_dict:
# Check that time since last authentication is < 1hr
# If greater than 1 hr, then re-authenticate
if(time.time() - gettime > 60* 59):
print("Re-authenticating API's connection ")
# use creds to create a client to interact with the Google Drive API
scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name(credentials_path, scope)
client = gspread.authorize(creds)
gettime = time.time()
# Find a workbook by name and open the first sheet
# Make sure you use the right name here.
assembly_sheet = client.open(worksheet_name).worksheet("EnsemblAssemblyRegistry")
assembly_row = assembly_db_dict[gca]
species_name = assembly_row[assembly_db_columns.index('subspecies_name')]
common_name = assembly_row[assembly_db_columns.index('common_name')]
chain = assembly_row[assembly_db_columns.index('chain')]
chain.encode('ascii','ignore')
version = assembly_row[assembly_db_columns.index('version')]
clade = assembly_row[assembly_db_columns.index('clade')]
contig_N50 = assembly_row[assembly_db_columns.index('contig_N50')]
assembly_level = assembly_row[assembly_db_columns.index('assembly_level')]
assembly_date = assembly_row[assembly_db_columns.index('assembly_date')]
refseq_accession = assembly_row[assembly_db_columns.index('refseq_accession')]
assembly_name = assembly_row[assembly_db_columns.index('assembly_name')]
genome_rep = assembly_row[assembly_db_columns.index('genome_rep')]
rnaseq_data = assembly_row[assembly_db_columns.index('rnaseq_data')]
gca = make_gca(chain,version)
genebuilder = assembly_row[assembly_db_columns.index('genebuilder')]
annotation_status = assembly_row[assembly_db_columns.index('progress_status')]
assembly_group = assembly_row[assembly_db_columns.index('assembly_group')]
# If GCA is in meta db, then it means db has been handed over
if gca in existing_annotations_dict:
annotation_status = 'Handed over'
# If the row does not exist then add it in with the filtering info
if not gca in existing_sheet_dict:
# Depending on the assembly group, we match the display to the right project naming convention
# For example, Darwin Tree of Life (DToL), Vertebrates Genomes Project (VGP), etc.
# Ungrouped refers to non-project specific assemblies
if assembly_group == 'dtol':
assembly_group = 'DToL'
elif assembly_group == 'ungrouped':
assembly_group = assembly_group.capitalize()
else:
assembly_group.upper()
# When an assembly is first written to sheets, its status should be set to 'Not started' and genebuilder set to 'Not assigned'
annotation_status = 'Not started'
genebuilder = 'Not assigned'
new_row = [gca,clade,species_name,common_name,contig_N50,assembly_level,assembly_date.strftime('%Y-%m-%d'),assembly_name,rnaseq_data,refseq_accession,genebuilder,annotation_status,assembly_group,'','Not assigned','']
# This section sets various filters
# Setting filter of versioned GCA
if version == max_version_dict[chain]:
new_row.append(1)
else:
new_row.append(0)
# Setting filter for genome representation
if genome_rep == 'full':
new_row.append(1)
else:
new_row.append(0)
# Setting contig_N50 filter
if contig_N50 >= min_contig_n50_filter:
new_row.append(1)
else:
new_row.append(0)
# Set RNASeq status based on contig_N50 if not already assigned
if rnaseq_data is None:
if contig_N50 >= 100000:
new_row[8] = 'No RNAseq data'
else:
new_row[8] = 'Non candidate assembly'
else:
new_row[8] = rnaseq_data.capitalize()
# There is an issue with the db at the moment with trailing spaces on the species names, but this should get fixed
if not (species_name == "Homo sapiens " or species_name == "Homo sapiens"):
new_row.append(1)
else:
new_row.append(0)
# Add new record to sheets
print(new_row)
insert_index = 2
assembly_sheet.append_row(new_row)
time.sleep(3)
# If it does exist we need to check if an update is required. There are only a few columns this might pertain to
else:
sheet_row = existing_sheet_dict[gca]
sheet_clade_index = assembly_sheet_columns.index('Clade')
sheet_clade_val = sheet_row[sheet_clade_index]
sheet_filter_version_index = assembly_sheet_columns.index('Filter: Max version')
sheet_filter_N50_index = assembly_sheet_columns.index('Filter: N50')
sheet_filter_version_val = sheet_row[sheet_filter_version_index]
sheet_filter_N50_val = sheet_row[sheet_filter_N50_index]
sheet_refseq_accession_index = assembly_sheet_columns.index('RefSeq accession')
sheet_assembly_name_index = assembly_sheet_columns.index('Assembly name')
sheet_refseq_accession_val = sheet_row[sheet_refseq_accession_index]
sheet_assembly_name_val = sheet_row[sheet_assembly_name_index]
sheet_rnaseq_data_index = assembly_sheet_columns.index('RNAseq data')
sheet_rnaseq_data_val = sheet_row[sheet_rnaseq_data_index]
sheet_contig_N50_index = assembly_sheet_columns.index('Contig N50')
sheet_contig_N50_val = sheet_row[sheet_contig_N50_index]
sheet_annotation_status_index = assembly_sheet_columns.index('Status')
sheet_annotation_status_val = sheet_row[sheet_annotation_status_index]
sheet_genebuilder_index = assembly_sheet_columns.index('Genebuilder')
sheet_genebuilder_val = sheet_row[sheet_genebuilder_index]
sheet_assembly_group_index = assembly_sheet_columns.index('Assembly group')
sheet_assembly_group_val = sheet_row[sheet_assembly_group_index]
# Check if transcriptomic data status from db is null.
# If yes, check if assembly has been handed over or if assembly meets candidate assembly criteria
if ((rnaseq_data is None) and (sheet_rnaseq_data_val == 'Non candidate assembly' or sheet_rnaseq_data_val == 'No RNAseq data' or sheet_rnaseq_data_val == 'Not available')):
# Nothing to update
print("No update on rnaseq data status for: " + gca)
# It is possible to annotate a species and handover without RNASeq data
elif rnaseq_data is None and annotation_status == 'Handed over':
# update the RNASeq data status as not applicable
rnaseq_data = 'N/A'
print("Updating rnaseq data status for: " + gca)
row_update_index = assembly_sheet.find(gca).row
update_cell_val(assembly_sheet,row_update_index,sheet_rnaseq_data_index,rnaseq_data)
time.sleep(3)
elif rnaseq_data.lower() != sheet_rnaseq_data_val.lower():
rnaseq_data = rnaseq_data.capitalize()
# update the RNASeq data status with value from db
print("Updating rnaseq data status for: " + gca)
row_update_index = assembly_sheet.find(gca).row
update_cell_val(assembly_sheet,row_update_index,sheet_rnaseq_data_index,rnaseq_data)
time.sleep(3)
# Sometimes we could have assemblies with no contig_N50 value in the sheet. This can cause issues with comparison
if sheet_contig_N50_val is None:
# Set a default value for contig_N50
sheet_contig_N50_val = 0
if contig_N50 != int(sheet_contig_N50_val):
# Update the contig info on the sheet
print("Updating the contig for: " + gca)
row_update_index = assembly_sheet.find(gca).row
update_cell_val(assembly_sheet,row_update_index,sheet_contig_N50_index,contig_N50)
time.sleep(3)
# Compare clade vlaues between db and sheets
if clade != sheet_clade_val:
# Update the clade
print("Updating the clade for: " + gca)
row_update_index = assembly_sheet.find(gca).row
update_cell_val(assembly_sheet,row_update_index,sheet_clade_index,clade)
time.sleep(3)
# Updating specific filters
if sheet_filter_version_val == "1" and str(version) != str(max_version_dict[chain]):
# update the max version to 0
print("Updating max version filter val for: " + gca)
row_update_index = assembly_sheet.find(gca).row
update_cell_val(assembly_sheet,row_update_index,sheet_filter_version_index,0)
time.sleep(3)
if contig_N50 >= min_contig_n50_filter and sheet_filter_N50_val == "0":
# update the N50 filter to 1
print("Updating contig_N50 filter val to 1 for: " + gca)
row_update_index = assembly_sheet.find(gca).row
update_cell_val(assembly_sheet,row_update_index,sheet_filter_N50_index,1)
time.sleep(3)
elif (contig_N50 < min_contig_n50_filter) and (sheet_filter_N50_val == "1"):
# update the N50 filter to 0
print("Updating contig_N50 filter val to 0 for: " + gca)
row_update_index = assembly_sheet.find(gca).row
update_cell_val(assembly_sheet,row_update_index,sheet_filter_N50_index,0)
time.sleep(3)
# Compare refseq accession where it exists
if not refseq_accession is None and refseq_accession != sheet_refseq_accession_val:
# Add/update the RefSeq accession
print("Updating RefSeq accession for: " + gca)
row_update_index = assembly_sheet.find(gca).row
update_cell_val(assembly_sheet,row_update_index,sheet_refseq_accession_index,refseq_accession)
time.sleep(3)
# Check if assembly name needs update
if not assembly_name is None and assembly_name != sheet_assembly_name_val:
# Add/update the assembly name
print("Updating Assembly name for: " + gca)
row_update_index = assembly_sheet.find(gca).row
update_cell_val(assembly_sheet,row_update_index,sheet_assembly_name_index,assembly_name)
time.sleep(3)
# Check status of the genebuild and update accordingly
if not annotation_status is None and annotation_status.lower() != sheet_annotation_status_val.lower():
# Add/update the annotation status
annotation_status = annotation_status.capitalize()
print("Updating genebuild status for: " + gca)
row_update_index = assembly_sheet.find(gca).row
update_cell_val(assembly_sheet,row_update_index,sheet_annotation_status_index,annotation_status)
time.sleep(3)
elif annotation_status is None:
annotation_status = 'Not started'
# Compare genebuilder information and update accordingly
if not genebuilder is None and genebuilder.lower() != sheet_genebuilder_val.lower():
# Add/update the genebuilder
print("Updating genebuilder for: " + gca)
row_update_index = assembly_sheet.find(gca).row
update_cell_val(assembly_sheet,row_update_index,sheet_genebuilder_index,genebuilder)
time.sleep(3)
elif genebuilder is None:
genebuilder = 'Not assigned'
# If the assembly group info between the sheets and db differs, update accordingly
if sheet_assembly_group_val is None:
print("This assembly has no group assigned on the sheet: " + gca)
if assembly_group == 'dtol':
assembly_group = 'DToL'
elif assembly_group == 'ungrouped':
assembly_group = assembly_group.capitalize()
else:
assembly_group = assembly_group.upper()
print("Updating assembly group info for: " + gca)
row_update_index = assembly_sheet.find(gca).row
update_cell_val(assembly_sheet,row_update_index,sheet_assembly_group_index,assembly_group)
time.sleep(3)
elif not assembly_group is None and assembly_group.lower() != sheet_assembly_group_val.lower():
if assembly_group == 'dtol':
assembly_group = 'DToL'
elif assembly_group == 'ungrouped':
assembly_group = assembly_group.capitalize()
else:
assembly_group = assembly_group.upper()
print("Updating assembly group info for: " + gca)
row_update_index = assembly_sheet.find(gca).row
update_cell_val(assembly_sheet,row_update_index,sheet_assembly_group_index,assembly_group)
time.sleep(3)
def make_gca(chain,version):
gca = chain + '.' + str(version)
return gca
def update_cell_val(assembly_sheet,row_index,col_offset,val):
col_offset += 1
assembly_sheet.update_cell(row_index,col_offset,val)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-ad','--assembly_db_dbname', help='Name for assembly registry db', required=True)
parser.add_argument('-ah','--assembly_db_host', help='Host for assembly registry db', required=True)
parser.add_argument('-ap','--assembly_db_port', help='Port for assembly registry db', required=True)
parser.add_argument('-au','--assembly_db_user', help='User for assembly registry db', required=True)
parser.add_argument('-md','--meta_db_dbname', help='Name for meta data db', required=True)
parser.add_argument('-mh','--meta_db_host', help='Host for meta data db', required=True)
parser.add_argument('-mp','--meta_db_port', help='Port for meta data db', required=True)
parser.add_argument('-mu','--meta_db_user', help='User for meta data db', required=True)
parser.add_argument('-wsn','--worksheet_name', help='The name of the Google Sheets worksheet', required=True)
parser.add_argument('-gsc','--gsheets_credentials', help='Path to a Google Sheets credentials JSON file for authentication', required=True)
args = parser.parse_args()
assembly_db_query = 'SELECT subspecies_name,common_name,chain,version,clade,contig_N50,assembly_level,assembly_date,refseq_accession,assembly_name,genome_rep,rnaseq_data,genebuilder,progress_status,assembly_group FROM assembly JOIN meta USING(assembly_id) JOIN species_space_log using(species_id) LEFT JOIN genebuild_status using(assembly_id)'
assembly_db_database = args.assembly_db_dbname
assembly_db_host = args.assembly_db_host
assembly_db_port = args.assembly_db_port
assembly_db_user = args.assembly_db_user
assembly_db_password = ''
assembly_db_data = fetch_db_data(assembly_db_query,assembly_db_database,assembly_db_host,assembly_db_port,assembly_db_user,assembly_db_password)
meta_db_query = 'SELECT assembly_accession from assembly where assembly_accession like "GCA%"'
meta_db_database = args.meta_db_dbname
meta_db_host = args.meta_db_host
meta_db_port = args.meta_db_port
meta_db_user = args.meta_db_user
meta_db_password = ''
meta_db_data = fetch_db_data(meta_db_query,meta_db_database,meta_db_host,meta_db_port,meta_db_user,meta_db_password)
worksheet_name = args.worksheet_name
credentials_path = args.gsheets_credentials
# use creds to create a client to interact with the Google Drive API
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name(credentials_path, scope)
client = gspread.authorize(creds)
gettime = time.time()
# Find a workbook by name and open the first sheet
# Make sure you use the right name here.
assembly_sheet = client.open(worksheet_name).worksheet("EnsemblAssemblyRegistry")
# Extract and print all of the values
existing_sheet_records = assembly_sheet.get_all_values()
update_assembly_sheet(assembly_db_data,meta_db_data,existing_sheet_records,assembly_sheet,gettime,worksheet_name)
``` |
{
"source": "jmgo/pydicom-seg",
"score": 2
} |
#### File: pydicom-seg/tests/test_segmentation_dataset.py
```python
import tempfile
import numpy as np
import pydicom
import pytest
from pydicom_seg import __version__
from pydicom_seg.dicom_utils import DimensionOrganizationSequence
from pydicom_seg.segmentation_dataset import (
SegmentationDataset,
SegmentationFractionalType,
SegmentationType
)
class TestSegmentationDataset:
def setup(self):
self.dataset = SegmentationDataset(
rows=1,
columns=1,
segmentation_type=SegmentationType.BINARY
)
self.setup_dummy_segment(self.dataset)
def setup_dummy_segment(self, dataset: pydicom.Dataset):
ds = pydicom.Dataset()
ds.SegmentNumber = 1
dataset.SegmentSequence.append(ds)
def generate_dummy_source_image(self):
ds = pydicom.Dataset()
ds.SOPClassUID = '1.2.840.10008.5.1.4.1.1.2' # CT Image Storage
ds.SOPInstanceUID = pydicom.uid.generate_uid()
ds.SeriesInstanceUID = pydicom.uid.generate_uid()
return ds
def test_dataset_is_writable(self):
with tempfile.NamedTemporaryFile() as ofile:
self.dataset.save_as(ofile.name)
def test_dataset_has_valid_file_meta(self):
pydicom.dataset.validate_file_meta(self.dataset.file_meta)
def test_mandatory_sop_common(self):
assert self.dataset.SOPClassUID == '1.2.840.10008.5.1.4.1.1.66.4'
assert 'SOPInstanceUID' in self.dataset
def test_mandatory_enhanced_equipment_elements(self):
"""http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.5.2.html#table_C.7-8b"""
assert self.dataset.Manufacturer == 'pydicom-seg'
assert self.dataset.ManufacturerModelName == '[email protected]/razorx89/pydicom-seg.git'
assert self.dataset.DeviceSerialNumber == '0'
assert self.dataset.SoftwareVersions == __version__
def test_mandatory_frame_of_reference_elements(self):
"""http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.4.html#table_C.7-6"""
assert 'FrameOfReferenceUID' in self.dataset
def test_mandatory_gernal_series_elements(self):
"""http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.3.html#table_C.7-5a"""
assert self.dataset.Modality == 'SEG'
assert 'SeriesInstanceUID' in self.dataset
def test_mandatory_segmentation_series_elements(self):
"""http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.8.20.html#table_C.8.20-1"""
assert self.dataset.Modality == 'SEG'
assert self.dataset.SeriesNumber
def test_mandatory_image_pixel_elements(self):
"""http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.3.html#table_C.7-11a"""
assert self.dataset.SamplesPerPixel >= 1
assert self.dataset.PhotometricInterpretation in ['MONOCHROME1', 'MONOCHROME2']
assert 'Rows' in self.dataset
assert 'Columns' in self.dataset
assert self.dataset.BitsAllocated in [1, 8, 16]
assert 0 < self.dataset.BitsStored <= self.dataset.BitsAllocated
assert self.dataset.HighBit == self.dataset.BitsStored - 1
assert self.dataset.PixelRepresentation in [0, 1]
def test_mandatory_and_common_segmentation_image_elements(self):
"""http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.8.20.2.html#table_C.8.20-2"""
assert 'ImageType' in self.dataset
assert all([a == b for a, b in zip(self.dataset.ImageType, ['DERIVED', 'PRIMARY'])])
assert self.dataset.InstanceNumber
assert self.dataset.ContentLabel == 'SEGMENTATION'
assert 'ContentCreatorName' in self.dataset
assert 'ContentDescription' in self.dataset
assert self.dataset.SamplesPerPixel == 1
assert self.dataset.PhotometricInterpretation == 'MONOCHROME2'
assert self.dataset.PixelRepresentation == 0
assert self.dataset.LossyImageCompression == '00'
assert 'SegmentSequence' in self.dataset
def test_mandatory_binary_segmentation_image_elements(self):
"""http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.8.20.2.html#table_C.8.20-2"""
assert self.dataset.BitsAllocated == 1
assert self.dataset.BitsStored == 1
assert self.dataset.HighBit == 0
assert self.dataset.SegmentationType == 'BINARY'
@pytest.mark.parametrize('fractional_type', ['PROBABILITY', 'OCCUPANCY'])
def test_mandatory_fractional_segmentation_image_elements(self, fractional_type):
"""http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.8.20.2.html#table_C.8.20-2"""
dataset = SegmentationDataset(
rows=1,
columns=1,
segmentation_type=SegmentationType.FRACTIONAL,
segmentation_fractional_type=SegmentationFractionalType(fractional_type)
)
assert dataset.BitsAllocated == 8
assert dataset.BitsStored == 8
assert dataset.HighBit == 7 # Little Endian
assert dataset.SegmentationType == 'FRACTIONAL'
assert dataset.SegmentationFractionalType == fractional_type
assert dataset.MaximumFractionalValue == 255
def test_mandatory_multi_frame_functional_groups_elements(self):
"""http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.html#table_C.7.6.16-1"""
assert 'SharedFunctionalGroupsSequence' in self.dataset
assert len(self.dataset.SharedFunctionalGroupsSequence) == 1
assert 'PerFrameFunctionalGroupsSequence' in self.dataset
assert self.dataset.NumberOfFrames == 0
assert self.dataset.InstanceNumber
assert 'ContentDate' in self.dataset
assert 'ContentTime' in self.dataset
def test_timestamps_exist(self):
assert 'InstanceCreationDate' in self.dataset
assert 'InstanceCreationTime' in self.dataset
assert self.dataset.InstanceCreationDate == self.dataset.SeriesDate
assert self.dataset.InstanceCreationTime == self.dataset.SeriesTime
assert self.dataset.InstanceCreationDate == self.dataset.ContentDate
assert self.dataset.InstanceCreationTime == self.dataset.ContentTime
def test_exception_on_invalid_image_dimensions(self):
with pytest.raises(ValueError, match='.*must be larger than zero'):
SegmentationDataset(
rows=0,
columns=0,
segmentation_type=SegmentationType.BINARY
)
@pytest.mark.parametrize('max_fractional_value', [-1, 0, 256])
def test_exception_on_invalid_max_fractional_value(self, max_fractional_value):
with pytest.raises(ValueError, match='Invalid maximum fractional value.*'):
SegmentationDataset(
rows=1,
columns=1,
segmentation_type=SegmentationType.FRACTIONAL,
max_fractional_value=max_fractional_value,
)
def test_exception_when_adding_frame_with_wrong_rank(self):
with pytest.raises(ValueError, match='.*expecting 2D image'):
self.dataset.add_frame(np.zeros((1, 1, 1), dtype=np.uint8), 1)
def test_exception_when_adding_frame_with_wrong_shape(self):
with pytest.raises(ValueError, match='.*expecting \\d+x\\d+ images'):
self.dataset.add_frame(np.zeros((2, 1), dtype=np.uint8), 1)
@pytest.mark.parametrize('segmentation_type,dtype', [
(SegmentationType.BINARY, np.float32),
(SegmentationType.FRACTIONAL, np.uint8)
])
def test_exception_when_adding_frame_with_wrong_data_type(self, segmentation_type, dtype):
dataset = SegmentationDataset(
rows=1,
columns=1,
segmentation_type=segmentation_type
)
with pytest.raises(ValueError, match='.*requires.*?data type'):
dataset.add_frame(np.zeros((1, 1), dtype=dtype), 1)
def test_adding_frame_increases_number_of_frames(self):
old_count = self.dataset.NumberOfFrames
print(type(old_count))
self.dataset.add_frame(np.zeros((1, 1), dtype=np.uint8), 1)
assert self.dataset.NumberOfFrames == old_count + 1
def test_adding_binary_frame_modifies_pixel_data(self):
dataset = SegmentationDataset(
rows=2,
columns=2,
segmentation_type=SegmentationType.BINARY
)
self.setup_dummy_segment(dataset)
assert len(dataset.PixelData) == 0
dataset.add_frame(np.zeros((2, 2), dtype=np.uint8), 1)
assert len(dataset.PixelData) == 1
for _ in range(2):
dataset.add_frame(np.ones((2, 2), dtype=np.uint8), 1)
assert len(dataset.PixelData) == 2
def test_adding_fractional_frame_modifies_pixel_data(self):
dataset = SegmentationDataset(
rows=2,
columns=2,
segmentation_type=SegmentationType.FRACTIONAL
)
self.setup_dummy_segment(dataset)
assert len(dataset.PixelData) == 0
dataset.add_frame(np.zeros((2, 2), dtype=np.float32), 1)
assert len(dataset.PixelData) == 4
for _ in range(2):
dataset.add_frame(np.ones((2, 2), dtype=np.float32), 1)
assert len(dataset.PixelData) == 12
def test_adding_frame_with_reference_creates_referenced_series_sequence(self):
assert 'ReferencedSeriesSequence' not in self.dataset
dummy = self.generate_dummy_source_image()
self.dataset.add_frame(np.zeros((1, 1), np.uint8), 1, [dummy])
assert 'ReferencedSeriesSequence' in self.dataset
series_sequence = self.dataset.ReferencedSeriesSequence
assert len(series_sequence) == 1
assert series_sequence[0].SeriesInstanceUID == dummy.SeriesInstanceUID
assert 'ReferencedInstanceSequence' in series_sequence[0]
instance_sequence = series_sequence[0].ReferencedInstanceSequence
assert len(instance_sequence) == 1
assert instance_sequence[0].ReferencedSOPClassUID == dummy.SOPClassUID
assert instance_sequence[0].ReferencedSOPInstanceUID == dummy.SOPInstanceUID
def test_adding_frames_with_different_references_from_same_series(self):
dummy1 = self.generate_dummy_source_image()
dummy2 = self.generate_dummy_source_image()
dummy2.SeriesInstanceUID = dummy1.SeriesInstanceUID
self.dataset.add_frame(np.zeros((1, 1), np.uint8), 1, [dummy1])
self.dataset.add_frame(np.zeros((1, 1), np.uint8), 1, [dummy2])
series_sequence = self.dataset.ReferencedSeriesSequence
assert len(series_sequence) == 1
assert series_sequence[0].SeriesInstanceUID == dummy1.SeriesInstanceUID
instance_sequence = series_sequence[0].ReferencedInstanceSequence
assert len(instance_sequence) == 2
assert instance_sequence[0].ReferencedSOPInstanceUID == dummy1.SOPInstanceUID
assert instance_sequence[1].ReferencedSOPInstanceUID == dummy2.SOPInstanceUID
def test_adding_frames_with_different_references_from_different_series(self):
dummies = [self.generate_dummy_source_image() for _ in range(2)]
self.dataset.add_frame(np.zeros((1, 1), np.uint8), 1, [dummies[0]])
self.dataset.add_frame(np.zeros((1, 1), np.uint8), 1, [dummies[1]])
series_sequence = self.dataset.ReferencedSeriesSequence
assert len(series_sequence) == 2
assert series_sequence[0].SeriesInstanceUID == dummies[0].SeriesInstanceUID
assert series_sequence[1].SeriesInstanceUID == dummies[1].SeriesInstanceUID
instance_sequence = series_sequence[0].ReferencedInstanceSequence
assert len(instance_sequence) == 1
assert instance_sequence[0].ReferencedSOPInstanceUID == dummies[0].SOPInstanceUID
instance_sequence = series_sequence[1].ReferencedInstanceSequence
assert len(instance_sequence) == 1
assert instance_sequence[0].ReferencedSOPInstanceUID == dummies[1].SOPInstanceUID
def test_adding_instance_reference_multiple_times(self):
dummy = self.generate_dummy_source_image()
item_added = self.dataset.add_instance_reference(dummy)
assert item_added
item_added = self.dataset.add_instance_reference(dummy)
assert not item_added
series_sequence = self.dataset.ReferencedSeriesSequence
assert len(series_sequence) == 1
assert series_sequence[0].SeriesInstanceUID == dummy.SeriesInstanceUID
assert len(series_sequence[0].ReferencedInstanceSequence) == 1
def test_adding_frame_increases_count_of_per_functional_groups_sequence(self):
assert len(self.dataset.PerFrameFunctionalGroupsSequence) == 0
self.dataset.add_frame(np.zeros((1, 1), np.uint8), 1)
assert len(self.dataset.PerFrameFunctionalGroupsSequence) == 1
def test_adding_frame_with_reference_adds_source_image_sequence_to_per_frame_functional_group_item(self):
frame_item = self.dataset.add_frame(np.zeros((1, 1), np.uint8), 1)
assert 'SourceImageSequence' not in frame_item
dummy = self.generate_dummy_source_image()
frame_item = self.dataset.add_frame(np.zeros((1, 1), np.uint8), 1, [dummy])
assert 'SourceImageSequence' in frame_item
assert len(frame_item.SourceImageSequence) == 1
def test_adding_frame_adds_referenced_segment_to_per_frame_functional_group_item(self):
frame_item = self.dataset.add_frame(np.zeros((1, 1), np.uint8), 1)
assert 'SegmentIdentificationSequence' in frame_item
assert len(frame_item.SegmentIdentificationSequence) == 1
segment_id_item = frame_item.SegmentIdentificationSequence[0]
assert 'ReferencedSegmentNumber' in segment_id_item
assert segment_id_item.ReferencedSegmentNumber == 1
def test_exception_on_adding_frame_with_non_existing_segment(self):
with pytest.raises(IndexError, match='Segment not found.*'):
self.dataset.add_frame(np.zeros((1, 1), np.uint8), 2)
def test_add_dimension_organization(self):
assert 'DimensionOrganizationSequence' not in self.dataset
assert 'DimensionIndexSequence' not in self.dataset
seq = DimensionOrganizationSequence()
seq.add_dimension('ReferencedSegmentNumber', 'SegmentIdentificationSequence')
seq.add_dimension('ImagePositionPatient', 'PlanePositionSequence')
self.dataset.add_dimension_organization(seq)
assert len(self.dataset.DimensionOrganizationSequence) == 1
assert len(self.dataset.DimensionIndexSequence) == 2
assert self.dataset.DimensionIndexSequence[0].DimensionDescriptionLabel == 'ReferencedSegmentNumber'
assert self.dataset.DimensionIndexSequence[1].DimensionDescriptionLabel == 'ImagePositionPatient'
def test_add_dimension_organization_duplicate(self):
seq = DimensionOrganizationSequence()
seq.add_dimension('ReferencedSegmentNumber', 'SegmentIdentificationSequence')
seq.add_dimension('ImagePositionPatient', 'PlanePositionSequence')
self.dataset.add_dimension_organization(seq)
with pytest.raises(ValueError, match='Dimension organization with UID.*'):
self.dataset.add_dimension_organization(seq)
def test_add_multiple_dimension_organizations(self):
for _ in range(2):
seq = DimensionOrganizationSequence()
seq.add_dimension('ReferencedSegmentNumber', 'SegmentIdentificationSequence')
seq.add_dimension('ImagePositionPatient', 'PlanePositionSequence')
self.dataset.add_dimension_organization(seq)
assert len(self.dataset.DimensionOrganizationSequence) == 2
assert len(self.dataset.DimensionIndexSequence) == 4
```
#### File: pydicom-seg/tests/test_writer.py
```python
import os
import numpy as np
import pydicom
import pytest
import SimpleITK as sitk
from pydicom_seg import MultiClassWriter
from pydicom_seg.template import from_dcmqi_metainfo
class TestMultiClassWriter:
def setup(self):
self.template = from_dcmqi_metainfo(os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'pydicom_seg',
'externals',
'dcmqi',
'doc',
'examples',
'seg-example_multiple_segments.json'
))
@pytest.mark.parametrize('dtype', [np.int8, np.float32])
def test_raises_on_invalid_data_type(self, dtype):
data = np.zeros((1, 1, 1), dtype=dtype)
segmentation = sitk.GetImageFromArray(data)
writer = MultiClassWriter(self.template)
with pytest.raises(ValueError, match='Unsigned integer data type.*'):
writer.write(segmentation, [])
def test_raises_on_invalid_rank(self):
data = np.zeros((1, 1), dtype=np.uint8)
segmentation = sitk.GetImageFromArray(data)
writer = MultiClassWriter(self.template)
with pytest.raises(ValueError, match='.*3D.*'):
writer = MultiClassWriter(self.template)
writer.write(segmentation, [])
def test_raises_on_invalid_component_count(self):
data = np.zeros((1, 1, 1, 2), dtype=np.uint8)
segmentation = sitk.GetImageFromArray(data, isVector=True)
writer = MultiClassWriter(self.template)
with pytest.raises(ValueError, match='.*single component per voxel'):
writer.write(segmentation, [])
def test_raises_on_empty_segmentation(self):
data = np.zeros((1, 1, 1), dtype=np.uint8)
segmentation = sitk.GetImageFromArray(data)
writer = MultiClassWriter(self.template)
with pytest.raises(ValueError, match='.*not contain any labels'):
writer.write(segmentation, [])
def test_raises_on_missing_segment_declaration(self):
data = np.full((1, 1, 1), fill_value=4, dtype=np.uint8)
segmentation = sitk.GetImageFromArray(data)
writer = MultiClassWriter(self.template, skip_missing_segment=False)
with pytest.raises(ValueError, match='.*declaration is missing.*'):
writer.write(segmentation, [])
def test_raises_on_empty_segmentation_after_skipped_missing_segment_declarations(self):
data = np.full((1, 1, 1), fill_value=4, dtype=np.uint8)
segmentation = sitk.GetImageFromArray(data)
writer = MultiClassWriter(self.template, skip_missing_segment=True)
with pytest.raises(ValueError, match='No segments found.*'):
writer.write(segmentation, [])
def test_full_slice_encoding(self):
data = np.ones((1, 512, 512), dtype=np.uint8)
segmentation = sitk.GetImageFromArray(data)
writer = MultiClassWriter(self.template)
ds = writer.write(segmentation, [])
assert ds.NumberOfFrames == 1
assert ds.Rows == 512
assert ds.Columns == 512
def test_shared_functional_groups_encoding(self):
data = np.ones((1, 512, 512), dtype=np.uint8)
segmentation = sitk.GetImageFromArray(data)
segmentation.SetSpacing((0.8, 0.8, 5.0))
segmentation.SetDirection((1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 1.0))
writer = MultiClassWriter(self.template)
ds = writer.write(segmentation, [])
sfg = ds.SharedFunctionalGroupsSequence[0]
print(sfg)
assert sfg.PixelMeasuresSequence[0].PixelSpacing[0] == 0.8
assert sfg.PixelMeasuresSequence[0].PixelSpacing[1] == 0.8
assert sfg.PixelMeasuresSequence[0].SliceThickness == 5.0
assert sfg.PixelMeasuresSequence[0].SpacingBetweenSlices == 5.0
assert all([
str(x) == y
for x, y in zip(
sfg.PlaneOrientationSequence[0].ImageOrientationPatient,
['1.000000e+00', '0.000000e+00', '0.000000e+00',
'0.000000e+00', '-1.000000e+00', '0.000000e+00']
)
])
def test_slice_encoding_with_cropping(self):
data = np.zeros((1, 512, 512), dtype=np.uint8)
data[0, 128:-128, 64:-64] = 1
segmentation = sitk.GetImageFromArray(data)
writer = MultiClassWriter(self.template, inplane_cropping=True)
ds = writer.write(segmentation, [])
assert ds.NumberOfFrames == 1
assert ds.Rows == 256
assert ds.Columns == 384
def test_slice_encoding_without_cropping(self):
data = np.zeros((1, 512, 512), dtype=np.uint8)
data[0, 128:-128, 64:-64] = 1
segmentation = sitk.GetImageFromArray(data)
writer = MultiClassWriter(self.template, inplane_cropping=False)
ds = writer.write(segmentation, [])
assert ds.NumberOfFrames == 1
assert ds.Rows == 512
assert ds.Columns == 512
def test_multi_class_encoding(self):
data = np.ones((1, 512, 512), dtype=np.uint8)
data[0, 128:-128, 128:-128] = 2
segmentation = sitk.GetImageFromArray(data)
writer = MultiClassWriter(self.template)
ds = writer.write(segmentation, [])
assert ds.Rows == 512
assert ds.Columns == 512
assert ds.NumberOfFrames == 2
assert ds.PerFrameFunctionalGroupsSequence[0].SegmentIdentificationSequence[0].ReferencedSegmentNumber == 1
assert ds.PerFrameFunctionalGroupsSequence[1].SegmentIdentificationSequence[0].ReferencedSegmentNumber == 2
def test_multi_class_slice_encoding_with_cropping(self):
data = np.zeros((1, 512, 512), dtype=np.uint8)
data[0, 64:128, 64:128] = 1
data[0, -128:-64, -128:-64] = 2
segmentation = sitk.GetImageFromArray(data)
writer = MultiClassWriter(self.template, inplane_cropping=True)
ds = writer.write(segmentation, [])
assert ds.NumberOfFrames == 2
assert ds.Rows == 384
assert ds.Columns == 384
def test_skip_empty_slices_multi_class(self):
data = np.zeros((2, 512, 512), dtype=np.uint8)
data[0, 64:128, 64:128] = 1
data[1, -128:-64, -128:-64] = 2
segmentation = sitk.GetImageFromArray(data)
writer = MultiClassWriter(
self.template,
inplane_cropping=True,
skip_empty_slices=True
)
ds = writer.write(segmentation, [])
assert ds.NumberOfFrames == 2
assert ds.Rows == 384
assert ds.Columns == 384
def test_noskip_empty_slices_multi_class(self):
data = np.zeros((2, 512, 512), dtype=np.uint8)
data[0, 64:128, 64:128] = 1
data[1, -128:-64, -128:-64] = 2
segmentation = sitk.GetImageFromArray(data)
writer = MultiClassWriter(
self.template,
inplane_cropping=True,
skip_empty_slices=False
)
ds = writer.write(segmentation, [])
assert ds.NumberOfFrames == 4
assert ds.Rows == 384
assert ds.Columns == 384
assert ds.pixel_array[0].any() # slice=0, segment=1
assert not ds.pixel_array[1].any() # slice=1, segment=1, only zeros
assert not ds.pixel_array[2].any() # slice=0, segment=2, only zeros
assert ds.pixel_array[3].any() # slice=1, segment=2
def test_skip_empty_slices_between_filled_slices(self):
data = np.zeros((3, 512, 512), dtype=np.uint8)
data[0, 64:128, 64:128] = 1
data[2, -128:-64, -128:-64] = 1
segmentation = sitk.GetImageFromArray(data)
writer = MultiClassWriter(
self.template,
inplane_cropping=True,
skip_empty_slices=True
)
ds = writer.write(segmentation, [])
assert ds.NumberOfFrames == 2
assert ds.Rows == 384
assert ds.Columns == 384
def test_missing_segment(self):
data = np.zeros((3, 512, 512), dtype=np.uint8)
data[0, 64:128, 64:128] = 1
data[2, -128:-64, -128:-64] = 4
segmentation = sitk.GetImageFromArray(data)
writer = MultiClassWriter(
self.template,
skip_missing_segment=True
)
ds = writer.write(segmentation, [])
assert ds.NumberOfFrames == 1
assert len(ds.SegmentSequence) == 1
``` |
{
"source": "jmgpeeters/fastavro",
"score": 2
} |
#### File: fastavro/tests/test_validation.py
```python
from fastavro.validation import (
ValidationError,
ValidationErrorData,
validate,
validate_many
)
import pytest
import numpy as np
import sys
# In PY2 when you do type(int) you get <type 'type'> but in PY3 you get
# <class 'type'>
if sys.version_info >= (3, 0):
type_type = 'class'
else:
type_type = 'type'
schema = {
"fields": [
{
"name": "str_null",
"type": ["null", "string"]
},
{
"name": "str",
"type": "string"
},
{
"name": "integ_null",
"type": ["null", "int"]
},
{
"name": "integ",
"type": "int"
}
],
"namespace": "namespace",
"name": "missingerror",
"type": "record"
}
# TODO: Add more test for all types and combinations
def validation_boolean(schema, *records):
return validate_many(records, schema, raise_errors=False)
def validation_raise(schema, *records):
return validate_many(records, schema, raise_errors=True)
def test_validate_string_in_int_raises():
records = [{
'str_null': 'str',
'str': 'str',
'integ_null': 'str',
'integ': 21,
}]
with pytest.raises(ValidationError) as exc:
validation_raise(schema, *records)
for error in exc.value.errors:
expected_type = error.schema
assert expected_type in ['null', 'int']
assert error.field == 'namespace.missingerror.integ_null'
def test_validate_string_in_int_false():
records = [{
'str_null': 'str',
'str': 'str',
'integ_null': 'str',
'integ': 21,
}]
assert validation_boolean(schema, *records) is False
def test_validate_true():
records = [
{'str_null': 'str', 'str': 'str', 'integ_null': 21, 'integ': 21, },
{'str_null': None, 'str': 'str', 'integ_null': None, 'integ': 21, },
]
assert validation_boolean(schema, *records) is True
validation_raise(schema, *records)
def test_validate_string_in_int_null_raises():
records = [{
'str_null': 'str',
'str': 'str',
'integ_null': 11,
'integ': 'str',
}]
with pytest.raises(ValidationError) as exc:
validation_raise(schema, *records)
for error in exc.value.errors:
expected_type = error.schema
assert expected_type == 'int'
assert error.field == 'namespace.missingerror.integ'
def test_validate_string_in_int_null_false():
records = [{
'str_null': 'str',
'str': 'str',
'integ_null': 11,
'integ': 'str',
}]
assert validation_boolean(schema, *records) is False
def test_validate_int_in_string_null_raises():
records = [{
'str_null': 11,
'str': 'str',
'integ_null': 21,
'integ': 21,
}]
with pytest.raises(ValidationError) as exc:
validation_raise(schema, *records)
for error in exc.value.errors:
expected_type = error.schema
assert expected_type in ['string', 'null']
assert error.field == 'namespace.missingerror.str_null'
def test_validate_int_in_string_null_false():
records = [{
'str_null': 11,
'str': 'str',
'integ_null': 21,
'integ': 21,
}]
assert validation_boolean(schema, *records) is False
def test_validate_int_in_string_raises():
records = [{
'str_null': 'str',
'str': 11,
'integ_null': 21,
'integ': 21,
}]
with pytest.raises(ValidationError) as exc:
validation_raise(schema, *records)
for error in exc.value.errors:
expected_type = error.schema
assert expected_type == 'string'
assert error.field == 'namespace.missingerror.str'
def test_validate_int_in_string_false():
records = [{
'str_null': 'str',
'str': 11,
'integ_null': 21,
'integ': 21,
}]
assert validation_boolean(schema, *records) is False
def test_validate_null_in_string_raises():
records = [{
'str_null': 'str',
'str': None,
'integ_null': 21,
'integ': 21,
}]
with pytest.raises((ValidationError,)):
validation_raise(schema, *records)
def test_validate_null_in_string_false():
records = [{
'str_null': 'str',
'str': None,
'integ_null': 21,
'integ': 21,
}]
assert validation_boolean(schema, *records) is False
def test_validate_unicode_in_string_does_not_raise():
"""https://github.com/fastavro/fastavro/issues/269"""
non_ascii = u'日本語'
records = [{
'str_null': non_ascii,
'str': 'str',
'integ_null': 21,
'integ': 21,
}]
validation_raise(schema, *records)
records = [{
'str_null': 'str',
'str': 'str',
'integ_null': 21,
'integ': non_ascii,
}]
with pytest.raises(ValidationError) as exc:
validation_raise(schema, *records)
for error in exc.value.errors:
assert error.datum == non_ascii
def test_validate_error_raises():
with pytest.raises(ValidationError):
raise ValidationError()
error = ValidationErrorData(10, "string", "test1")
msg = "test1 is <10> of type <{} 'int'> expected string".format(type_type)
assert msg in str(error)
def test_validate_error_none_field():
error = ValidationErrorData(10, "string", None)
msg = " is <10> of type <{} 'int'> expected string".format(type_type)
assert msg in str(error)
def test_validator_numeric():
for datum, schema in [
(1, 'int'),
(1, 'long'),
(1.0, 'float'),
(1.0, 'double'),
(1, 'float'),
(1, 'double'),
]:
validate(datum, schema)
for datum, schema in [
(1.0, 'int'),
(1.0, 'long'),
("1.0", 'float'),
("1.0", 'double'),
("1", 'float'),
("1", 'double'),
(True, 'int'),
(True, 'long'),
(True, 'float'),
(True, 'double'),
(False, 'int'),
(False, 'long'),
(False, 'float'),
(False, 'double'),
]:
with pytest.raises(ValidationError):
validate(datum, schema)
pytest.fail("{} should not validate as {}".format(datum, schema))
def test_validator_logical():
"""https://github.com/fastavro/fastavro/issues/365"""
for datum, schema in [
(1, {"type": "long", "logicalType": "timestamp-micros"}),
]:
validate(datum, schema)
for datum, schema in [
("foo", {"type": "long", "logicalType": "timestamp-micros"}),
]:
with pytest.raises(ValidationError):
validate(datum, schema)
pytest.fail("{} should not validate as {}".format(datum, schema))
def test_validate_array():
my_schema = {
"fields": [
{
"name": "array",
"type": {
"type": "array",
"items": "string",
},
},
],
"namespace": "namespace",
"name": "test_validate_array",
"type": "record"
}
datum = {"array": [1]}
with pytest.raises(ValidationError) as exc:
validate(datum, my_schema)
for error in exc.value.errors:
assert error.field == 'namespace.test_validate_array.array'
def test_validate_map():
my_schema = {
"fields": [
{
"name": "map",
"type": {
"type": "map",
"values": "string",
},
},
],
"namespace": "namespace",
"name": "test_validate_map",
"type": "record"
}
datum = {"map": {"key": 1}}
with pytest.raises(ValidationError) as exc:
validate(datum, my_schema)
for error in exc.value.errors:
assert error.field == 'namespace.test_validate_map.map'
def test_validator_numeric_numpy():
np_ints = [
np.int_,
np.intc,
np.intp,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
]
np_floats = [
np.float_,
np.float16,
np.float32,
np.float64,
]
schema_ints = ['int', 'long']
schema_floats = ['float', 'double']
# all these should work
for nptype, schema in zip(np_ints, schema_ints):
validate(nptype(1), schema)
for nptype, schema in zip(np_ints, schema_floats):
validate(nptype(1), schema)
for nptype, schema in zip(np_floats, schema_floats):
validate(nptype(1), schema)
# these shouldn't work
for nptype, schema in zip(np_floats, schema_ints):
with pytest.raises(ValidationError):
validate(nptype(1), schema)
``` |
{
"source": "jmgrady/TheCombine",
"score": 2
} |
#### File: deploy/scripts/combine_charts.py
```python
import argparse
from pathlib import Path
from jinja2 import Environment, PackageLoader, select_autoescape
helm_dir = Path(__file__).resolve().parent.parent / "helm"
# Map the chart names to their location. This is useful for updating
# dependencies (in Chart.yaml) as well as the charts.
helm_charts = [
helm_dir / "aws-login",
helm_dir / "thecombine",
helm_dir / "thecombine" / "charts" / "backend",
helm_dir / "thecombine" / "charts" / "database",
helm_dir / "thecombine" / "charts" / "frontend",
helm_dir / "thecombine" / "charts" / "maintenance",
helm_dir / "cert-proxy-client",
helm_dir / "cert-proxy-server",
helm_dir / "create-admin-user",
]
def parse_args() -> argparse.Namespace:
"""Define command line arguments for parser."""
# Parse user command line arguments
parser = argparse.ArgumentParser(
description="Update the version and appVersions for the Helm charts.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"version",
help="New version for the Helm charts.",
)
return parser.parse_args()
def generate(version: str) -> None:
print("New version: {version}")
version_config = {
"version": {
"aws_login": "v0.2.0",
"thecombine": f"v{version}",
"cert_proxy_client": f"v{version}",
"cert_proxy_server": f"v{version}",
"create_admin_user": f"v{version}",
}
}
for chart_dir in helm_charts:
# Initialize the Jinja2 environment
jinja_env = Environment(
loader=PackageLoader("combine_charts", str(chart_dir)),
autoescape=select_autoescape(["html", "xml"]),
trim_blocks=False,
lstrip_blocks=True,
)
template = jinja_env.get_template("Chart.yaml.j2")
final_chart = chart_dir / "Chart.yaml"
print(f"Writing: {final_chart}")
final_chart.write_text(template.render(version_config))
def main() -> None:
args = parse_args()
generate(args.version)
if __name__ == "__main__":
main()
```
#### File: maintenance/scripts/combine_restore.py
```python
import argparse
import logging
import os
from pathlib import Path
import re
import sys
import tarfile
import tempfile
from typing import List, Tuple
from aws_backup import AwsBackup
from combine_app import CombineApp
import humanfriendly
from script_step import ScriptStep
def parse_args() -> argparse.Namespace:
"""Define command line arguments for parser."""
parser = argparse.ArgumentParser(
description="Restore TheCombine database and backend files from a file in AWS S3.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--verbose", action="store_true", help="Print intermediate values to aid in debugging"
)
parser.add_argument(
"--clean", action="store_true", help="Clean out Backend files before restoring from backup"
)
parser.add_argument("--file", help="name of file in AWS S3 to be restored.")
return parser.parse_args()
def aws_strip_bucket(obj_name: str) -> str:
"""Strip the bucket name from the beginning of the supplied object name."""
match = re.match(r"^[^/]+/(.*)", obj_name)
if match is not None:
return match.group(1)
return obj_name
def main() -> None:
"""Restore TheCombine from a backup stored in the AWS S3 service."""
args = parse_args()
if args.verbose:
logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO)
else:
logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.WARNING)
combine = CombineApp()
aws = AwsBackup(bucket=os.environ["aws_bucket"])
step = ScriptStep()
step.print("Prepare for the restore.")
with tempfile.TemporaryDirectory() as restore_dir:
restore_file = "combine-backup.tar.gz"
if args.file:
backup = args.file
else:
# Get the list of backups
backup_list_output = aws.list().stdout.strip().split("\n")
if len(backup_list_output) == 0:
print(f"No backups available from {os.environ['aws_bucket']}")
sys.exit(0)
# Convert the list of backups to a more useful structure
aws_backup_list: List[Tuple[str, str]] = []
for backup_row in backup_list_output:
backup_components = backup_row.split()
aws_backup_list.append(
(
humanfriendly.format_size(int(backup_components[2])),
aws_strip_bucket(backup_components[3]),
)
)
# Print out the list of backups to choose from. In the process,
# update each line in the backup list to be the AWS S3 object name
# and its (human-friendly) size.
print("Backup List:")
for i, backup_entry in enumerate(aws_backup_list):
print(f"{i+1}: {backup_entry[1]} ({backup_entry[0]})")
backup_num = int(
input("Enter the number of the backup you would like to restore (0 = None):")
)
if backup_num == 0:
print("No backup selected. Exiting.")
sys.exit(0)
backup = aws_backup_list[backup_num - 1][1]
step.print(f"Fetch the selected backup, {backup}.")
aws.pull(backup, Path(restore_dir) / restore_file)
step.print("Unpack the backup.")
os.chdir(restore_dir)
with tarfile.open(restore_file, "r:gz") as tar:
tar.extractall()
step.print("Restore the database.")
db_pod = combine.get_pod_id(CombineApp.Component.Database)
if not db_pod:
print("Cannot find the database container.", file=sys.stderr)
sys.exit(1)
combine.kubectl(
[
"cp",
os.environ["db_files_subdir"],
f"{db_pod}:/",
]
)
combine.exec(
db_pod,
[
"mongorestore",
"--drop",
"--gzip",
"--quiet",
],
)
combine.exec(
db_pod,
[
"rm",
"-rf",
os.environ["db_files_subdir"],
],
)
step.print("Copy the backend files.")
backend_pod = combine.get_pod_id(CombineApp.Component.Backend)
if not backend_pod:
print("Cannot find the backend container.", file=sys.stderr)
sys.exit(1)
# if --clean option was used, delete the existing backend files
if args.clean:
# we run the rm command inside a bash shell so that the shell will do wildcard
# expansion
combine.exec(
backend_pod,
[
"/bin/bash",
"-c",
f"rm -rf /home/app/{os.environ['backend_files_subdir']}/*",
],
)
combine.kubectl(
["cp", os.environ["backend_files_subdir"], f"{backend_pod}:/home/app", "--no-preserve"]
)
if __name__ == "__main__":
main()
``` |
{
"source": "jmgraeffe/gtao_python_wrapper",
"score": 3
} |
#### File: jmgraeffe/gtao_python_wrapper/blip.py
```python
import __orange__
from GTAOrange import world as _world
from GTAOrange import vehicle as _vehicle
from GTAOrange import player as _player
__pool = {}
class Blip():
"""Blip class
DO NOT GENERATE NEW OBJECTS DIRECTLY! Please use the create() function instead.
Attributes:
id (id): blip id
is_global (bool): boolean which says if this blip is displayed to all players or not
"""
id = None
is_global = None
_ehandlers = {}
def __init__(self, id, is_global):
"""Initializes a new Blip object.
Args:
id (TYPE): Description
is_global (TYPE): Description
"""
self.id = id
self.is_global = is_global
def attachTo(self, dest):
"""Attaches the blip to the vehicle represented by the given vehicle object, or to the player represented by the given player object.
Args:
dest (GTAOrange.player.Player OR GTAOrange.vehicle.Vehicle): player or vehicle object
Returns:
bool: True for success, False for failure
"""
if isinstance(dest, _player.Player):
__orange__.AttachBlipToPlayer(self.id, dest.id)
return True
elif isinstance(dest, _vehicle.Vehicle):
__orange__.AttachBlipToVehicle(self.id, dest.id)
return True
else:
return False
def delete(self):
"""Deletes the blip.
"""
deleteByID(self.id)
def distanceTo(self, x, y, z=None):
"""Returns the distance from the blip to the given coordinates.
Args:
x (float): x-coord
y (float): y-coord
z (float, optional): z-coord
Returns:
float: distance between blip and given coordinates
"""
if z is not None:
x1, y1, z1 = self.getPosition()
return _world.getDistance(x1, y1, z1, x, y, z)
else:
x1, y1 = self.getPosition()
return _world.getDistance(x1, y1, x, y)
def getID(self):
"""Returns blip id.
Returns:
int: blip id
"""
return self.id
def getPosition(self):
"""Returns current position.
Returns:
tuple: position tuple with 3 values
"""
return __orange__.GetBlipCoords(self.id)
def setColor(self, color):
"""Sets color of the blip.
Args:
color (GTAOrange.blip.Color): blip color
"""
__orange__.SetBlipColor(self.id, color)
def setRoute(self, route):
"""Enables/disables routing to blip.
Args:
route (bool): True for routing, False for not
"""
__orange__.SetBlipRoute(self.id, route)
def setScale(self, scale):
"""Sets scale of blip.
Args:
scale (float): blip scale
"""
__orange__.SetBlipScale(self.id, scale)
def setSprite(self, sprite):
"""Sets sprite (texture, icon) of blip.
Args:
sprite (GTAOrange.blip.Sprite): blip sprite
"""
__orange__.SetBlipSprite(self.id, sprite)
def setShortRange(self, toggle):
"""Sets that blip can be seen only on the short distance.
Args:
toggle (bool): True for yes, False for no
"""
__orange__.SetBlipShortRange(self.id, toggle)
def create(name, x, y, z, scale=1.0, color=None, sprite=None):
"""Creates a new blip.
This is the right way to spawn a new blip.
Args:
name (string): name (displayed in the map legend)
x (float): x-coord of blip
y (float): y-coord of blip
z (float): z-coord of blip
scale (float, optional): blip scale
color (GTAOrange.blip.Color, optional): blip color
sprite (GTAOrange.blip.Sprite, optional): blip sprite (texture, icon)
Returns:
GTAOrange.blip.Blip: blip object
"""
global __pool
blip = Blip(__orange__.CreateBlipForAll(name, x, y, z, scale,
color if color is not None else Color.ORANGE, sprite if sprite is not None else Sprite.STANDARD), True)
__pool[blip.id] = blip
return blip
def deleteByID(id):
"""Deletes a blip object by the given id.
Args:
id (int): blip id
Returns:
bool: True on success, False on failure
Raises:
TypeError: raises if blip id is not int
"""
global __pool
if isinstance(id, int):
if id in __pool.keys():
del __pool[id]
return __orange__.DeleteBlip(id)
else:
return False
else:
raise TypeError('Blip ID must be an integer')
def getByID(id):
"""Returns blip object by given id.
Args:
id (int): blip id
Returns:
bool: True on success, False on failure
Raises:
TypeError: raises if blip id is not int
"""
global __pool
if isinstance(id, int):
if id in __pool.keys():
return __pool[id]
return False
else:
raise TypeError('Blip ID must be an integer')
def getAll():
"""Returns dictionary with all blip objects.
WARNING! Can cause heavy load on some servers. If you can avoid using it, don't use it!
Returns:
dict: blip dictionary
"""
return __pool
class Color():
"""Enum-like class with attributes representing all colors which can be used for blips
"""
WHITE = 0
RED = 1
GREEN = 2
BLUE = 3
ORANGE = 17
PURPLE = 19
GREY = 20
BROWN = 21
PINK = 23
DARKGREEN = 25
DARKPURPLE = 27
DARKBLUE = 29
MICHAELBLUE = 42
FRANKLINGREEN = 43
TREVORORANGE = 44
YELLOW = 66
class Sprite():
"""Enum-like class with attributes representing all sprites which can be used for blips
"""
STANDARD = 1
BIGBLIP = 2
POLICEOFFICER = 3
POLICEAREA = 4
SQUARE = 5
PLAYER = 6
NORTH = 7
WAYPOINT = 8
BIGCIRCLE = 9
BIGCIRCLEOUTLINE = 10
ARROWUPOUTLINED = 11
ARROWDOWNOUTLINED = 12
ARROWUP = 13
ARROWDOWN = 14
POLICEHELICOPTERANIMATED = 15
JET = 16
NUMBER1 = 17
NUMBER2 = 18
NUMBER3 = 19
NUMBER4 = 20
NUMBER5 = 21
NUMBER6 = 22
NUMBER7 = 23
NUMBER8 = 24
NUMBER9 = 25
NUMBER10 = 26
GTAOCREW = 27
GTAOFRIENDLY = 28
LIFT = 36
RACEFINISH = 38
SAFEHOUSE = 40
POLICEOFFICER2 = 41
POLICECARDOT = 42
POLICEHELICOPTER = 43
CHATBUBBLE = 47
GARAGE2 = 50
DRUGS = 51
STORE = 52
POLICECAR = 56
POLICEPLAYER = 58
POLICESTATION = 60
HOSPITAL = 61
HELICOPTER = 64
STRANGERSANDFREAKS = 65
ARMOREDTRUCK = 66
TOWTRUCK = 68
BARBER = 71
LOSSANTOSCUSTOMS = 72
CLOTHES = 73
TATTOOPARLOR = 75
SIMEON = 76
LESTER = 77
MICHAEL = 78
TREVOR = 79
RAMPAGE = 84
VINEWOODTOURS = 85
LAMAR = 86
FRANKLIN = 88
CHINESE = 89
AIRPORT = 90
BAR = 93
BASEJUMP = 94
CARWASH = 100
COMEDYCLUB = 102
DART = 103
FIB = 106
DOLLARSIGN = 108
GOLF = 109
AMMUNATION = 110
EXILE = 112
SHOOTINGRANGE = 119
SOLOMON = 120
STRIPCLUB = 121
TENNIS = 122
TRIATHLON = 126
OFFROADRACEFINISH = 127
KEY = 134
MOVIETHEATER = 135
MUSIC = 136
MARIJUANA = 140
HUNTING = 141
ARMSTRAFFICKINGGROUND = 147
NIGEL = 149
ASSAULTRIFLE = 150
BAT = 151
GRENADE = 152
HEALTH = 153
KNIFE = 154
MOLOTOV = 155
PISTOL = 156
RPG = 157
SHOTGUN = 158
SMG = 159
SNIPER = 160
SONICWAVE = 161
POINTOFINTEREST = 162
GTAOPASSIVE = 163
GTAOUSINGMENU = 164
LINK = 171
MINIGUN = 173
GRENADELAUNCHER = 174
ARMOR = 175
CASTLE = 176
CAMERA = 184
HANDCUFFS = 188
YOGA = 197
CAB = 198
NUMBER11 = 199
NUMBER12 = 200
NUMBER13 = 201
NUMBER14 = 202
NUMBER15 = 203
NUMBER16 = 204
SHRINK = 205
EPSILON = 206
PERSONALVEHICLECAR = 225
PERSONALVEHICLEBIKE = 226
CUSTODY = 237
ARMSTRAFFICKINGAIR = 251
FAIRGROUND = 266
PROPERTYMANAGEMENT = 267
ALTRUIST = 269
ENEMY = 270
CHOP = 273
DEAD = 274
HOOKER = 279
FRIEND = 280
BOUNTYHIT = 303
GTAOMISSION = 304
GTAOSURVIVAL = 305
CRATEDROP = 306
PLANEDROP = 307
SUB = 308
RACE = 309
DEATHMATCH = 310
ARMWRESTLING = 311
AMMUNATIONSHOOTINGRANGE = 313
RACEAIR = 314
RACECAR = 315
RACESEA = 316
GARBAGETRUCK = 318
SAFEHOUSEFORSALE = 350
PACKAGE = 351
MARTINMADRAZO = 352
ENEMYHELICOPTER = 353
BOOST = 354
DEVIN = 355
MARINA = 356
GARAGE = 357
GOLFFLAG = 358
HANGAR = 359
HELIPAD = 360
JERRYCAN = 361
MASKS = 362
HEISTSETUP = 363
INCAPACITATED = 364
PICKUPSPAWN = 365
BOILERSUIT = 366
COMPLETED = 367
ROCKETS = 368
GARAGEFORSALE = 369
HELIPADFORSALE = 370
MARINAFORSALE = 371
HANGARFORSALE = 372
BUSINESS = 374
BUSINESSFORSALE = 375
RACEBIKE = 376
PARACHUTE = 377
TEAMDEATHMATCH = 378
RACEFOOT = 379
VEHICLEDEATHMATCH = 380
BARRY = 381
DOM = 382
MARYANN = 383
CLETUS = 384
JOSH = 385
MINUTE = 386
OMEGA = 387
TONYA = 388
PAPARAZZO = 389
CROSSHAIR = 390
CREATOR = 398
CREATORDIRECTION = 399
ABIGAIL = 400
BLIMP = 401
REPAIR = 402
TESTOSTERONE = 403
DINGHY = 404
FANATIC = 405
INFORMATION = 407
CAPTUREBRIEFCASE = 408
LASTTEAMSTANDING = 409
BOAT = 410
CAPTUREHOUSE = 411
JERRYCAN2 = 415
RP = 416
GTAOPLAYERSAFEHOUSE = 417
GTAOPLAYERSAFEHOUSEDEAD = 418
CAPTUREAMERICANFLAG = 419
CAPTUREFLAG = 420
TANK = 421
HELICOPTERANIMATED = 422
PLANE = 423
PLAYERNOCOLOR = 425
GUNCAR = 426
SPEEDBOAT = 427
HEIST = 428
STOPWATCH = 430
DOLLARSIGNCIRCLED = 431
CROSSHAIR2 = 432
DOLLARSIGNSQUARED = 434
```
#### File: jmgraeffe/gtao_python_wrapper/debug.py
```python
def dump(obj, magic=False):
"""Dumps every attribute of an object to the console.
Args:
obj (any object): object you want to dump
magic (bool, optional): True if you want to output "magic" attributes (like __init__, ...)
"""
for attr in dir(obj):
if magic is True:
print("obj.%s = %s" % (attr, getattr(obj, attr)))
else:
if not attr.startswith('__'):
print("obj.%s = %s" % (attr, getattr(obj, attr)))
```
#### File: jmgraeffe/gtao_python_wrapper/thread.py
```python
import threading
__pool = {}
__current = 0
class Thread(threading.Thread):
id = None
_function = None
def __init__(self, f):
global __current
threading.Thread.__init__(self)
self.id = __current
self._function = f
__current += 1
```
#### File: jmgraeffe/gtao_python_wrapper/world.py
```python
import math
def getDistance(x1, y1, z1, x2, y2=None, z2=None):
"""Returns the distance between two points, either 3-dimensional ones or 2-dimensional ones.
Please use the components of them in a row as parameters.
For example, if you've 2d points:
A(10|20), B(30|40)
getDistance(10, 20, 30, 40)
And if you've 3d points:
C(50|60|70), D(80|90|100)
getDistance(50, 60, 70, 80, 90, 100)
Args:
x1 (float): x-coord of first point
y1 (float): y-coord of first point
z1 (float): z-coord of first point
x2 (float): x-coord of second point
y2 (float, optional): y-coord of second point
z2 (float, optional): z-coord of second point
Returns:
float: distance between given points
"""
if y2 is None:
return math.sqrt((z1 - x1) ^ 2 + (x2 - y1) ^ 2)
else:
return math.sqrt((x1 - x1) ^ 2 + (y2 - y1) ^ 2 + (z2 - z1) ^ 2)
```
#### File: gtao_python_wrapper/wrappertest/__init__.py
```python
from threading import Thread
import __orange__ as API
import GTAOrange.player as Player
import GTAOrange.vehicle as Vehicle
import GTAOrange.blip as Blip
import GTAOrange.text as Text
import GTAOrange.marker as Marker
import GTAOrange.object as Object
def _sendPlayerList(target):
players = Player.getAll()
target.chatMsg("Players:")
for key, player in players.items():
target.chatMsg(player.getName())
def _threadTest():
print("Sleeping...")
i = 0
while True:
print(i)
i += 1
print("Woke up!")
def onEventStart(bla, bli):
print(bla)
print(bli)
return True
def onPlayerConnect(player, ip):
print('Player:connect | ' + str(player.getName()) + ' | ' + ip)
player.setPosition(100.0, -1940.0, 21.0)
# own attributes
player.testveh = None
player.testblip = None
# trying player-local events
player.on("leftvehicle", onPlayerLeftVehicle)
return True
def onPlayerDisconnect(player, reason):
print('Player:disconnect | ' + str(player) + ' | ' + str(reason))
def onPlayerCommand(player, command):
# print('Player:command | ' + str(player.getID()) + ' | ' + command)
command = command.split()
# player commands
if(command[0] == "/setpos"):
player.setPosition(float(command[1]), float(
command[2]), float(command[3]))
elif(command[0] == "/players"):
_sendPlayerList(player)
elif(command[0] == "/getpos"):
x, y, z = player.getPosition()
player.chatMsg("{:.9f}".format(x) + "|" +
"{:.9f}".format(y) + "|" + "{:.9f}".format(z))
cords = player.getPosition()
print(cords)
elif(command[0] == "/sethead"):
player.setHeading(float(command[1]))
elif(command[0] == "/gethead"):
player.chatMsg(str(player.chatMsg(player.getHeading())))
elif(command[0] == "/removeweapons"):
player.removeWeapons()
elif(command[0] == "/giveweapon"):
player.giveWeapon(int(command[1]), int(command[2]))
elif(command[0] == "/giveammo"):
pass
elif(command[0] == "/givemoney"):
player.giveMoney(int(command[1]))
elif(command[0] == "/setmoney"):
player.setMoney(int(command[1]))
elif(command[0] == "/resetmoney"):
player.resetMoney()
elif(command[0] == "/getmoney"):
player.chatMsg(str(player.getMoney()))
elif(command[0] == "/setmodel"):
player.setModel(int(command[1]))
elif(command[0] == "/getmodel"):
player.chatMsg(str(player.getModel()))
elif(command[0] == "/setname"):
player.setName(command[1])
elif(command[0] == "/getname"):
player.chatMsg(player.getName())
elif(command[0] == "/sethealth"):
player.setHealth(float(command[1]))
elif(command[0] == "/gethealth"):
player.chatMsg(str(player.getHealth()))
elif(command[0] == "/setarmour"):
player.setArmour(float(command[1]))
elif(command[0] == "/getarmour"):
player.chatMsg(str(player.getArmour()))
elif(command[0] == "/playerblip"):
player.attachBlip(player.getName() + "'s super special blip", 2)
elif(command[0] == "/nullblip"):
API.CreateBlipForAll("0|0|0", 0.0, 0.0, 70.0, 1.0, 17, 11)
elif(command[0] == "/setcolor"):
val = API.SetPlayerColor(player, int(command[1]))
print(val)
elif(command[0] == "/getcolor"):
val = API.GetPlayerColor(player)
print(val)
elif(command[0] == "/broadcast"):
player.broadcast(command[1], int(command[2]))
elif(command[0] == "/sendmessage"):
player.chatMsg(command[1])
elif(command[0] == "/disablehud"):
if command[1] == 1:
player.disableHUD()
else:
player.enableHUD()
# thread tests
elif command[0] == "/thread":
t = Thread(target=_threadTest)
t.daemon = True
t.start()
# vehicle commands
elif command[0] == "/veh":
if command[1] == "create":
if player.testveh is None:
x, y, z = player.getPosition()
player.testveh = Vehicle.create(
"Burrito", x, y, z, player.getHeading())
player.chatMsg("Created a Burrito! :-) | ID: " +
str(player.testveh.id))
else:
player.chatMsg("Please delete your car before!")
elif command[1] == "delete":
if player.testveh is not None:
player.testveh.delete()
player.testveh = None
else:
player.chatMsg("Please create a car before!")
elif command[1] == "getpos":
if player.testveh is not None:
#x, y, z = player.testveh.getPosition()
#player.chatMsg("{:.9f}".format(x) + "|" + "{:.9f}".format(y) + "|" + "{:.9f}".format(z))
val = player.testveh.getPosition()
print(val)
else:
player.chatMsg("Please create a car before!")
elif command[1] == "setpos":
pass
# blip commands
elif command[0] == "/blip":
if command[1] == "create":
if player.testveh is None:
x, y, z = player.getPosition()
player.testblip = Blip.create("TADAAAA", x, y, 90)
player.chatMsg("Created a fancy blip! :-) | ID: " +
str(player.testblip.id))
else:
player.chatMsg("Please delete your car before!")
elif command[1] == "delete":
if player.testblip is not None:
player.testblip.delete()
player.testblip = None
else:
player.chatMsg("Please create a blip before!")
elif command[1] == "getpos":
if player.testblip is not None:
#x, y, z = player.testveh.getPosition()
#player.chatMsg("{:.9f}".format(x) + "|" + "{:.9f}".format(y) + "|" + "{:.9f}".format(z))
val = player.testblip.getPosition()
print(val)
else:
player.chatMsg("Please create a car before!")
elif command[1] == "setpos":
pass
# 3dtext commands
elif command[0] == "/3dtext":
if command[1] == "create":
x, y, z = player.getPosition()
text = Text.create("Test", x, y, z)
player.chatMsg(
"Created a fancy 3d text! :-) | ID: " + str(text.id))
elif command[1] == "delete":
text = Text.getByID(int(command[2]))
if text is not False:
text.delete()
elif command[1] == "getpos":
text = Text.getByID(int(command[2]))
if text is not False:
val = text.getPosition()
print(val)
elif command[1] == "setpos":
pass
# marker commands
elif command[0] == "/marker":
if command[1] == "create":
x, y, z = player.getPosition()
marker = Marker.create(x, y, z)
marker.on("playerentered", onPlayerEnteredMarker)
player.chatMsg("Created a fancy marker! :-) | ID: " +
str(marker.id))
elif command[1] == "delete":
marker = Marker.getByID(int(command[2]))
if marker is not False:
marker.delete()
elif command[1] == "getpos":
marker = Marker.getByID(int(command[2]))
if marker is not False:
val = marker.getPosition()
print(val)
elif command[1] == "setpos":
pass
# object commands
elif command[0] == "/object":
if command[1] == "create":
x, y, z = player.getPosition()
obj = Object.create(1204839864, x, y, z, 1.0, 1.0, 1.0)
player.chatMsg("Created a fancy object! :-) | ID: " + str(obj.id))
elif command[1] == "delete":
obj = Object.getByID(int(command[2]))
if obj is not False:
obj.delete()
else:
print(' '.join(command))
return True
def onPlayerEnteredVehicle(player, veh):
print('Vehicle:playerentered | ' +
str(player.getID()) + ' | ' + str(veh.getID()))
def onPlayerLeftVehicle(player, veh):
print('Vehicle:playerleft | ' + str(player.getID()) + ' | ' + str(veh.getID()))
def onPlayerEnteredMarker(marker, player):
x, y, z = marker.getPosition()
player.setPosition(x, y, z + 5)
Player.on("connect", onPlayerConnect)
Player.on("command", onPlayerCommand)
Vehicle.on("playerentered", onPlayerEnteredVehicle)
#API.TriggerServerEvent("PlayerCommand", [0,"Test"])
``` |
{
"source": "jmgraeffe/ieee802-11-simplified-mac-simulator",
"score": 2
} |
#### File: ieee802-11-simplified-mac-simulator/exporters/iterations_transmissions_plot.py
```python
import matplotlib.pyplot as plt
from simulation import Scheme
def export(simulations, file_path, marker_styles=None):
schemes = []
xticks = []
scheme_xs = {}
scheme_ys = {}
for num_iterations, scheme_simulations in simulations.items():
xticks.append(num_iterations)
for scheme, simulations in scheme_simulations.items():
if scheme not in scheme_xs.keys():
schemes.append(scheme)
scheme_xs[scheme] = []
scheme_ys[scheme] = []
scheme_xs[scheme].append(num_iterations)
scheme_ys[scheme].append(simulations[16].successful_transmissions) # TODO: remove hardcoded 16, bad style :(
for scheme in schemes:
if marker_styles is None:
plt.plot(scheme_xs[scheme], scheme_ys[scheme], 'o-', label='{}'.format(Scheme.to_human_name(scheme)))
else:
plt.plot(scheme_xs[scheme], scheme_ys[scheme], label='{}'.format(Scheme.to_human_name(scheme)), **marker_styles[scheme])
plt.grid()
plt.xlabel('Number of Iterations')
plt.ylabel('Number of Successful Transmissions')
# plt.xticks(xticks)
# plt.xscale('log')
plt.legend(fancybox=True, framealpha=1.0)
plt.savefig(file_path, bbox_inches='tight')
plt.clf()
```
#### File: ieee802-11-simplified-mac-simulator/simulation/classes.py
```python
import copy
class Simulation:
class _Log(list):
def append(self, obj):
super().append(copy.deepcopy(obj))
def __init__(self, scheme, num_stations, num_iterations, cw_start, cw_end):
# not changing
self.scheme = scheme
self.num_stations = num_stations
self.num_iterations = num_iterations
self.cw_start = cw_start
self.cw_end = cw_end
# changing
self.frame_log = Simulation._Log() # history of what happened in a time slot (e.g. data sent by station x, or a collision, or nothing)
self.station_log = Simulation._Log() # history of station states (backoff counter etc.)
self.collisions_stations = 0 # collisions over all stations, e.g. if you've multiple stations there can be multiple collisions per time slot
self.collisions_ap = 0 # collisions as seen from the AP, so even if multiple stations are trying to send, there will be only one collision
self.successful_transmissions = 0 # since collisions do not occur even when no data is transmitted in a time slot at all, this is also an important factor
# you could alternatively calculate that afterwards by counting the None values in frame_log by iterating over it, but for performance we just do it on the go
def add(self, simulation):
self.frame_log = None # this is obviously not available anymore then
self.station_log = None # this is obviously not available anymore then
self.collisions_stations += simulation.collisions_stations
self.collisions_ap += simulation.collisions_ap
self.successful_transmissions += simulation.successful_transmissions
def divide_by(self, num):
self.collisions_stations /= num
self.collisions_ap /= num
self.successful_transmissions /= num
class Medium:
def __init__(self, ap):
self.iteration = 0
self.sending_stations_this_iteration = []
self.ap = ap
def evaluate_iteration(self):
num_sending_stations = len(self.sending_stations_this_iteration)
if num_sending_stations > 0:
if num_sending_stations == 1:
station = self.sending_stations_this_iteration[0]
# tell station and AP that the station succeeded
station.feedback(False)
self.ap.data(station)
return DataFrame(station)
else:
# tell every collided station that it collided as feedback
# (theoretically, this would be done by the station/AP itself, e.g. by sensing the medium)
for station in self.sending_stations_this_iteration:
station.feedback(True)
# tell the AP that collisions occured
# (theoretically, the AP won't know which stations caused the collision,
# but it's simpler for programming to just pass it to the AP directly
# and it does not change anything)
self.ap.collision(self.sending_stations_this_iteration)
return CollisionFrame(num_sending_stations)
else:
return None
def next_iteration(self):
self.iteration += 1
self.sending_stations_this_iteration.clear()
def send(self, station):
self.sending_stations_this_iteration.append(station)
class Station:
def __init__(self, num, medium):
self.num = num
self.medium = medium
def feedback(self, collision):
pass
class AccessPoint(Station):
def __init__(self, medium):
super().__init__(-1, medium)
def data(self, station):
pass
def collision(self, stations):
pass
class DataFrame:
def __init__(self, station):
self.station = station
class CollisionFrame:
def __init__(self, collisions):
self.collisions = collisions
```
#### File: ieee802-11-simplified-mac-simulator/simulation/__init__.py
```python
from enum import Enum
from multiprocessing import cpu_count, Pool
import logging
import collections
import time
class Scheme(Enum):
DCF_BASIC = 1
DCF_NO_BACKOFF_MEMORY = 2
DCF_GLOBAL_CW = 3
CRB = 4,
TBRI = 5 # 3bRI, 3 bit of reservation information, three bit scheduling
@classmethod
def to_human_name(cls, scheme):
if scheme is cls.DCF_BASIC:
return 'DCF'
elif scheme is cls.CRB:
return 'CRB'
elif scheme is cls.TBRI:
return '3bRI'
else:
return str(scheme)
def run(scheme=Scheme.DCF_BASIC, num_stations=50, num_iterations=1000, cw_start=15, cw_end=255):
if scheme is Scheme.DCF_BASIC:
from .schemes.dcf_basic import Simulator
# elif scheme is Scheme.DCF_NO_BACKOFF_MEMORY:
# from .schemes.dcf_nobackoffmemory import Simulator
# elif scheme is Scheme.DCF_GLOBAL_CW:
# from .schemes.dcf_globalcw import Simulator
elif scheme is Scheme.CRB:
from .schemes.crb import Simulator
elif scheme is Scheme.TBRI:
from.schemes.tbri import Simulator
else:
logging.error('Scheme \'{}\' not implemented!'.format(scheme))
return
simulation = Simulator(num_stations, num_iterations, cw_start, cw_end).run()
logging.info('-' * 64)
logging.info('collisions_ap\t\t\t\t= {}'.format(simulation.collisions_ap))
logging.info('collisions_stations\t\t\t= {}'.format(simulation.collisions_stations))
logging.info('successful_transmissions\t= {}'.format(simulation.successful_transmissions))
logging.info('-' * 64)
return simulation
def run_process(args):
return run(*args)
def run_multiple(range_iterations, schemes, range_stations, cw_start=15, cw_end=255):
simulations = collections.OrderedDict()
process_args = []
for num_iterations in range_iterations:
simulations[num_iterations] = collections.OrderedDict()
for scheme in schemes:
simulations[num_iterations][scheme] = collections.OrderedDict()
for num_stations in range_stations:
process_args.append((scheme, num_stations, num_iterations, cw_start, cw_end))
with Pool(processes=int(3 * cpu_count() / 4)) as pool:
results = pool.map(run_process, process_args)
for result in results:
simulations[result.num_iterations][result.scheme][result.num_stations] = result
return simulations
def run_multiple_averaged(num_simulations, range_iterations, schemes, range_stations, cw_start=15, cw_end=255):
start = time.time()
first = run_multiple(range_iterations, schemes, range_stations, cw_start, cw_end)
end = time.time()
print("1. simulation finished in {} seconds!".format(end - start))
# add statistics of all missing simulations to the first one
for num_simulation in range(num_simulations - 1):
start = time.time()
simulations = run_multiple(range_iterations, schemes, range_stations, cw_start, cw_end)
end = time.time()
print("{}. simulation finished in {} seconds!".format(num_simulation + 2, end - start))
for num_iterations, simulations1 in simulations.items():
for scheme, simulations2 in simulations1.items():
for num_stations, simulation in simulations2.items():
first[num_iterations][scheme][num_stations].add(simulation)
# divide everything to get average
for num_iterations, simulations1 in first.items():
for scheme, simulations2 in simulations1.items():
for num_stations, simulation in simulations2.items():
first[num_iterations][scheme][num_stations].divide_by(num_simulations)
return first
```
#### File: schemes/_old/dcf_globalcw.py
```python
import random
from .dcf_basic import Station as DcfStation, Simulator as DcfSimulator, Medium as DcfMedium
class Station(DcfStation):
def tick(self):
"""Gets called by simulation on every time slot to determine what the current backoff is.
"""
if self.backoff is None:
self.backoff = random.randrange(self.medium.cw_size + 1)
self.backoff -= 1
class Medium(DcfMedium):
def __init__(self, cw_start, cw_end):
super().__init__(cw_start, cw_end)
self.cw_size = cw_start
class Simulator(DcfSimulator):
def __init__(self, num_stations, num_iterations, cw_start, cw_end):
super().__init__(num_stations, num_iterations, cw_start, cw_end)
self.medium = Medium(cw_start, cw_end)
self.stations = self.generate_stations(num_stations)
def generate_station(self, num):
return Station(num, self.medium)
def do_contention_phase(self):
collisions, sender = super().do_contention_phase()
if collisions > 0:
self.medium.cw_size = min(((self.medium.cw_size + 1) * 2) - 1, self.medium.cw_end)
else:
self.medium.cw_size = self.medium.cw_start
print('cw_size set to {}'.format(self.medium.cw_size))
return (collisions, sender)
``` |
{
"source": "jmgray24/amazon-timestream-tools",
"score": 3
} |
#### File: sample_apps/python/CsvIngestionExample.py
```python
import csv
import time
from Constant import DATABASE_NAME, TABLE_NAME
class CsvIngestionExample:
def __init__(self, client):
self.client = client
def bulk_write_records(self, filepath):
with open(filepath, 'r') as csv_file:
# creating a csv reader object
csv_reader = csv.reader(csv_file)
records = []
current_time = self._current_milli_time()
counter = 0
# extracting each data row one by one
for row in csv_reader:
dimensions = [
{'Name': row[0], 'Value': row[1]},
{'Name': row[2], 'Value': row[3]},
{'Name': row[4], 'Value': row[5]}
]
record_time = current_time - (counter * 50)
record = {
'Dimensions': dimensions,
'MeasureName': row[6],
'MeasureValue': row[7],
'MeasureValueType': row[8],
'Time': str(record_time)
}
records.append(record)
counter = counter + 1
if len(records) == 100:
self._submit_batch(records, counter)
records = []
if len(records) != 0:
self._submit_batch(records, counter)
print("Ingested %d records" % counter)
def _submit_batch(self, records, counter):
try:
result = self.client.write_records(DatabaseName=DATABASE_NAME, TableName=TABLE_NAME,
Records=records, CommonAttributes={})
print("Processed [%d] records. WriteRecords Status: [%s]" % (counter,
result['ResponseMetadata']['HTTPStatusCode']))
except Exception as err:
print("Error:", err)
@staticmethod
def _current_milli_time():
return int(round(time.time() * 1000))
```
#### File: tools/perf-scale-workload/continuous_ingester.py
```python
from collections import namedtuple
import threading
import multiprocessing
import time
from tdigest import TDigest
import timestreamwrite as tswrite
import model
import datetime
from timeit import default_timer as timer
import json
import sys, traceback
import random
import math
import signal
def getTimestampMillis():
## Pick a random timestamp in the 200 minute window to stagger data points generated by
## the different processes.
currentTime = int(round(time.time() * 1000))
return random.randint(currentTime - 100, currentTime + 100)
def getCurrentTimestampMillis():
return int(round(time.time() * 1000))
### State shared by the threads within tne same process. Since processes have different global
### state, each process will have it's own local copy of this.
seriesId = 0
timestamp = getTimestampMillis()
sigInt = False
lock = threading.Lock()
event = threading.Event()
def signalHandler(sig, frame):
global sigInt
global lock
global event
with lock:
sigInt = True
event.set()
#########################################
######### Ingestion Thread ##############
#########################################
class IngestionThread(threading.Thread):
def __init__(self, threadId, args, dimensionMetrics, dimensionEvents, highUtilizationHosts, lowUtilizationHosts, event):
threading.Thread.__init__(self)
self.threadId = threadId
self.args = args
self.dimensionMetrics = dimensionMetrics
self.dimensionEvents = dimensionEvents
self.client = tswrite.createWriteClient(args.endpoint, profile=args.profile)
self.databaseName = args.databaseName
self.tableName = args.tableName
self.numMetrics = len(dimensionMetrics)
self.numEvents = len(dimensionEvents)
self.digest = TDigest() ## Use the t-digest to compute the streaming percentiles
self.count = 0
self.success = 0
self.sum = 0.0
self.variance = float('nan')
self.highUtilizationHosts = highUtilizationHosts
self.lowUtilizationHosts = lowUtilizationHosts
self.sigInt = False
self.event = event
def run(self):
global seriesId
global timestamp
global lock
idx = 0
mean = 0.0
squared = 0.0
while True:
with lock:
if self.sigInt == True or sigInt == True or self.event.is_set():
print("Thread {} exiting.".format(self.threadId))
break
seriesId += 1
if seriesId >= self.numMetrics + self.numEvents:
## Wrapping around, so move to new timestamp.
seriesId = 0
newTimestamp = timestamp + self.args.intervalMillis
currentTime = getCurrentTimestampMillis()
## Check if the timestamps are falling behind
if newTimestamp < currentTime - 0.05 * self.args.intervalMillis:
print("Can't keep up ingestion to the desired inter-event interval. Expected interval: {} ms. Actual: {} ms. Consider increasing concurrency or processes.".format(self.args.intervalMillis, currentTime - timestamp))
## Move time forward.
timestamp = getTimestampMillis()
else:
timestamp = newTimestamp
## Check if we are ingesting too fast, then slow down.
if timestamp > currentTime - 1000:
## Slow down
sleepTimeSecs = int((timestamp - currentTime)/1000)
print("Thread {} sleeping for {} secs".format(self.threadId, sleepTimeSecs))
time.sleep(sleepTimeSecs)
now = datetime.datetime.now()
print("Resetting to first series from thread: [{}] at time {}. Timestamp set to: {}.".format(self.threadId, now.strftime("%Y-%m-%d %H:%M:%S"), timestamp))
localSeriesId = seriesId
localTimestamp = timestamp
if localSeriesId < self.numMetrics:
commonAttributes = model.createWriteRecordCommonAttributes(self.dimensionMetrics[localSeriesId])
records = model.createRandomMetrics(seriesId, localTimestamp, "MILLISECONDS", self.highUtilizationHosts, self.lowUtilizationHosts)
else:
commonAttributes = model.createWriteRecordCommonAttributes(self.dimensionEvents[localSeriesId - self.numMetrics])
records = model.createRandomEvent(localTimestamp, "MILLISECONDS")
idx += 1
start = timer()
try:
writeResult = tswrite.writeRecords(self.client, self.databaseName, self.tableName, commonAttributes, records)
self.success += 1
except Exception as e:
print(e)
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout)
requestId = "RequestId: {}".format(e.response['ResponseMetadata']['RequestId'])
print(requestId)
print(json.dumps(commonAttributes, indent=2))
print(json.dumps(records, indent=2))
continue
finally:
self.count += 1
end = timer()
cur = end - start
self.digest.update(cur)
self.sum += cur
## Computing the streaming M^2 (squared distance from mean)
delta = cur - mean
mean += delta / self.count
squared += delta * (cur - mean)
if self.count > 1:
self.variance = float(squared / (self.count - 1))
requestId = writeResult['ResponseMetadata']['RequestId']
if idx % 1000 == 0:
now = datetime.datetime.now()
print("{}. {}. {}. Last RequestId: {}. Avg={:,}, Stddev={:,}, 50thPerc={:,}, 90thPerc={:,}, 99thPerc={:,}".format(
self.threadId, idx, now.strftime("%Y-%m-%d %H:%M:%S"), requestId, round(self.sum / self.count, 3),
round(math.sqrt(self.variance), 3), round(self.digest.percentile(50), 3),
round(self.digest.percentile(90), 3), round(self.digest.percentile(99), 3)))
def interrupt(self):
print("Interrupting thread: ", self.threadId)
self.sigInt = True
IngestionSummaryStats = namedtuple('IngestionSummaryStats', 'digest count success sum variance')
#########################################
## Process that spawns multiple ##
## ingestion threads. ##
#########################################
class MultiProcessIngestWorker(multiprocessing.Process):
def __init__(self, processId, args, dimensionMetrics, dimensionEvents, highUtilizationHosts, lowUtilizationHosts, conn, event):
super(MultiProcessIngestWorker, self).__init__()
self.processId = processId
self.args = args
self.conn = conn
self.event = event
self.dimensionMetrics = dimensionMetrics
self.dimensionEvents = dimensionEvents
self.highUtilizationHosts = highUtilizationHosts
self.lowUtilizationHosts = lowUtilizationHosts
self.threads = list()
def run(self):
global lock
global seriesId
global timestamp
with lock:
## Randomly pick a series ID to start for this process.
seriesId = random.randint(0, len(self.dimensionEvents) + len(self.dimensionMetrics) - 1)
timestamp = getTimestampMillis()
print("Process {} using start series ID: {}".format(self.processId, seriesId))
## Register sigint handler
signal.signal(signal.SIGINT, signalHandler)
overallSummary = None
ingestionStart = timer()
try:
for threadId in range(self.args.concurrency):
threadIdStr = "{}-{}".format(self.processId, threadId + 1)
print("Starting ThreadId: {}".format(threadIdStr))
thread = IngestionThread(threadIdStr, self.args, self.dimensionMetrics, self.dimensionEvents,
self.highUtilizationHosts, self.lowUtilizationHosts, self.event)
thread.start()
self.threads.append(thread)
success = 0
count = 0
totalLatency = 0.0
aggregatedDigests = TDigest()
pooledVariance = 0.0
for t in self.threads:
t.join()
success += t.success
## Pool the variance.
if count == 0:
pooledVariance = t.variance
else:
pooledVariance = ((count - 1) * pooledVariance + (t.count - 1) * t.variance) / ((count - 1) + (t.count - 1))
count += t.count
aggregatedDigests += t.digest
totalLatency += t.sum
print("[Process: {}] Total={:,}, Success={:,}, Avg={:,}, Stddev={:,}, 50thPerc={:,}, 90thPerc={:,}, 99thPerc={:,}".format(
self.processId, count, success,
round(totalLatency / count, 3),
round(math.sqrt(pooledVariance), 3), round(aggregatedDigests.percentile(50), 3),
round(aggregatedDigests.percentile(90), 3), round(aggregatedDigests.percentile(99), 3)))
overallSummary = IngestionSummaryStats(aggregatedDigests, count, success, totalLatency, pooledVariance)
ingestionEnd = timer()
print("Total time to ingest: {:,} seconds".format(round(ingestionEnd - ingestionStart, 2)))
finally:
self.conn.send(overallSummary)
#########################################
######### Ingest load ###################
#########################################
## List of processes that are being created
processes = list()
def signalHandlerMultiProc(sig, frame):
if len(processes) == 0:
print("No processes to interrupt")
for p, conn, event in processes:
event.set()
def initializeHighAndLowUtilizationHosts(numHosts):
hostIds = list(range(numHosts))
utilizationRand = random.Random(12345)
utilizationRand.shuffle(hostIds)
lowUtilizationHosts = frozenset(hostIds[0:int(0.2 * len(hostIds))])
highUtilizationHosts = frozenset(hostIds[-int(0.2 * len(hostIds)):])
return (lowUtilizationHosts, highUtilizationHosts)
def ingestRecordsMultiProc(dimensionsMetrics, dimensionsEvents, args):
## Register sigint handler
signal.signal(signal.SIGINT, signalHandlerMultiProc)
numHosts = len(dimensionsMetrics)
remainder = numHosts % args.processes
startId = 0
ingestionStart = timer()
for processId in range(1, args.processes + 1):
endId = startId + int(numHosts / args.processes) + (1 if remainder > 0 else 0)
if endId > numHosts:
print("Number of processes more than number of hosts, skipping process creation")
break
print("Starting process {} with host ranges: [{}, {}]".format(processId, startId, endId - 1))
## Select a subset of hosts
dimensionsMetricsLocal = dimensionsMetrics[startId:endId]
dimensionsMetricsSet = set()
for dim in dimensionsMetricsLocal:
dimensionsMetricsSet.add((dim.region, dim.cell, dim.silo, dim.availability_zone, dim.microservice_name, dim.instance_name))
dimensionsEventsLocal = list()
## Select the dimension events for the hosts selected above.
for dim in dimensionsEvents:
host = (dim.region, dim.cell, dim.silo, dim.availability_zone, dim.microservice_name, dim.instance_name)
if host in dimensionsMetricsSet:
dimensionsEventsLocal.append(dim)
print("Starting process {} with host ranges: [{}, {}]. Metrics: {}. Events: {}".format(processId, startId, endId - 1,
len(dimensionsMetricsLocal), len(dimensionsEventsLocal)))
lowUtilizationHosts, highUtilizationHosts = initializeHighAndLowUtilizationHosts(len(dimensionsMetricsLocal))
parentConn, childConn = multiprocessing.Pipe()
manager = multiprocessing.Manager()
event = manager.Event()
process = MultiProcessIngestWorker(processId, args, dimensionsMetricsLocal, dimensionsEventsLocal, highUtilizationHosts, lowUtilizationHosts, childConn, event)
process.start()
processes.append((process, parentConn, event))
remainder -= 1
startId = endId
success = 0
count = 0
totalLatency = 0.0
aggregatedDigests = TDigest()
pooledVariance = 0.0
for p, conn, event in processes:
output = conn.recv()
p.join()
if output == None:
continue
success += output.success
## Pool the variance.
if count == 0:
pooledVariance = output.variance
else:
pooledVariance = ((count - 1) * pooledVariance + (output.count - 1) * output.variance) / ((count - 1) + (output.count - 1))
count += output.count
aggregatedDigests += output.digest
totalLatency += output.sum
print("[OVERALL] Total={:,}, Success={:,}, Avg={:,}, Stddev={:,}, 50thPerc={:,}, 90thPerc={:,}, 99thPerc={:,}".format(count, success,
round(totalLatency / count, 3),
round(math.sqrt(pooledVariance), 3), round(aggregatedDigests.percentile(50), 3),
round(aggregatedDigests.percentile(90), 3), round(aggregatedDigests.percentile(99), 3)))
ingestionEnd = timer()
print("Total time to ingest: {:,} seconds".format(round(ingestionEnd - ingestionStart, 2)))
```
#### File: tools/perf-scale-workload/model.py
```python
from collections import defaultdict, namedtuple
import random, string
import os
import numpy as np
######################################################################################
## Data model for an example DevOps application tracking resource utilization stats ##
## for hosts deployed in a service. The service is deployed across multiple regions ##
## where each region has multiple instances of the application across different ##
## silos and cells. The service also has multiple micro-services. ##
######################################################################################
######################################################################################
## A visual representation of the schema corresponding to the application.
##
## region
## / ... \
## / ... \
## cell1 cell2
## /...\ /...\
## / ... \ / ... \
## silo1 silo2
## / ... \
## / ... \
## availability_zone, microservice_name, instance_type, os_version, instance_name
######################################################################################
#######################################
###### Dimension model for schema #####
#######################################
regionIad = "us-east-1"
regionCmh = "us-east-2"
regionSfo = "us-west-1"
regionPdx = "us-west-2"
regionDub = "eu-west-1"
regionNrt = "ap-northeast-1"
regions = [regionIad, regionCmh, regionSfo, regionPdx, regionDub, regionNrt]
cellsPerRegion = {
regionIad : 15, regionCmh : 2, regionSfo: 6, regionPdx : 2, regionDub : 10, regionNrt: 5
}
siloPerCell = {
regionIad : 3, regionCmh: 2, regionSfo: 2, regionPdx: 2, regionDub : 2, regionNrt: 3
}
microserviceApollo = "apollo"
microserviceAthena = "athena"
microserviceDemeter = "demeter"
microserviceHercules = "hercules"
microserviceZeus = "zeus"
microservices = [microserviceApollo, microserviceAthena, microserviceDemeter, microserviceHercules, microserviceZeus]
instance_r5_4xl = "r5.4xlarge"
instance_m5_8xl = "m5.8xlarge"
instance_c5_16xl = "c5.16xlarge"
instance_m5_4xl = "m5.4xlarge"
instanceTypes = {
microserviceApollo: instance_r5_4xl,
microserviceAthena: instance_m5_8xl,
microserviceDemeter: instance_c5_16xl,
microserviceHercules: instance_r5_4xl,
microserviceZeus: instance_m5_4xl
}
osAl2 = "AL2"
osAl2012 = "AL2012"
osVersions = {
microserviceApollo: osAl2,
microserviceAthena: osAl2012,
microserviceDemeter: osAl2012,
microserviceHercules: osAl2012,
microserviceZeus: osAl2
}
instancesForMicroservice = {
microserviceApollo: 3,
microserviceAthena: 1,
microserviceDemeter: 1,
microserviceHercules: 2,
microserviceZeus: 3
}
processHostmanager = "host_manager"
processServer = "server"
processNames = {
microserviceApollo: [processServer],
microserviceAthena: [processServer, processHostmanager],
microserviceDemeter: [processServer, processHostmanager],
microserviceHercules: [processServer],
microserviceZeus: [processServer]
}
jdk8 = "JDK_8"
jdk11 = "JDK_11"
jdkVersions = {
microserviceApollo: jdk11,
microserviceAthena: jdk8,
microserviceDemeter: jdk8,
microserviceHercules: jdk8,
microserviceZeus: jdk11
}
######################################################################################
## The metrics and event names reported by the application. These metric names are
## mapped to measure_name in Timestream.
######################################################################################
measureCpuUser = 'cpu_user'
measureCpuSystem = 'cpu_system'
measureCpuIdle = 'cpu_idle'
measureCpuIowait = 'cpu_iowait'
measureCpuSteal = 'cpu_steal'
measureCpuNice = 'cpu_nice'
measureCpuSi = 'cpu_si'
measureCpuHi = 'cpu_hi'
measureMemoryFree = 'memory_free'
measureMemoryUsed = 'memory_used'
measureMemoryCached = 'memory_cached'
measureDiskIoReads = 'disk_io_reads'
meausreDiskIoWrites = 'disk_io_writes'
measureLatencyPerRead = 'latency_per_read'
measureLatencyPerWrite = 'latency_per_write'
measureNetworkBytesIn = 'network_bytes_in'
measureNetworkBytesOut = 'network_bytes_out'
measureDiskUsed = 'disk_used'
measureDiskFree = 'disk_free'
measureFileDescriptors = 'file_descriptors_in_use'
measureTaskCompleted = 'task_completed'
measureTaskEndState = 'task_end_state'
measureGcReclaimed = 'gc_reclaimed'
measureGcPause = 'gc_pause'
measuresForMetrics = [measureCpuUser, measureCpuSystem, measureCpuIdle, measureCpuIowait,
measureCpuSteal, measureCpuNice, measureCpuSi, measureCpuHi,
measureMemoryFree, measureMemoryUsed, measureMemoryCached, measureDiskIoReads,
meausreDiskIoWrites, measureLatencyPerRead, measureLatencyPerWrite, measureNetworkBytesIn,
measureNetworkBytesOut, measureDiskUsed, measureDiskFree, measureFileDescriptors]
measuresForEvents = [measureTaskCompleted, measureTaskEndState, measureGcReclaimed, measureGcPause, measureMemoryFree]
measureValuesForTaskEndState = ['SUCCESS_WITH_NO_RESULT', 'SUCCESS_WITH_RESULT', 'INTERNAL_ERROR', 'USER_ERROR', 'UNKNOWN', 'THROTTLED']
selectionProbabilities = [0.2, 0.7, 0.01, 0.07, 0.01, 0.01]
DimensionsMetric = namedtuple('DimensionsMetric', 'region cell silo availability_zone microservice_name instance_type os_version instance_name')
DimensionsEvent = namedtuple('DimensionsEvent', 'region cell silo availability_zone microservice_name instance_name process_name, jdk_version')
## Generate an alphanumeric string which is used as part of instance names.
def generateRandomAlphaNumericString(length = 5, seed=12345):
## Use a fixed seed to generate the same string across invocations.
rand = random.Random(seed)
x = ''.join(rand.choices(string.ascii_letters + string.digits, k=length))
print("Instance name suffix:", x)
return x
## Generate the values of the dimensions based on the hierarchical schema and data distribution
## characteristics defined earlier.
def generateDimensions(scaleFactor, seed = 12345):
instancePrefix = generateRandomAlphaNumericString(8, seed)
dimensionsMetrics = list()
dimenstionsEvents = list()
for region in regions:
cellsForRegion = cellsPerRegion[region]
siloForRegion = siloPerCell[region]
for cell in range(1, cellsForRegion + 1):
for silo in range(1, siloForRegion + 1):
for microservice in microservices:
cellName = "{}-cell-{}".format(region, cell)
siloName = "{}-cell-{}-silo-{}".format(region, cell, silo)
numInstances = scaleFactor * instancesForMicroservice[microservice]
for instance in range(numInstances):
az = "{}-{}".format(region, (instance % 3) + 1)
instanceName = "i-{}-{}-{}-{:08}.amazonaws.com".format(instancePrefix, microservice, siloName, instance)
instanceType = instanceTypes[microservice]
osVersion = osVersions[microservice]
metric = DimensionsMetric(region, cellName, siloName, az, microservice, instanceType, osVersion, instanceName)
dimensionsMetrics.append(metric)
jdkVersion = jdkVersions[microservice]
for process in processNames[microservice]:
event = DimensionsEvent(region, cellName, siloName, az, microservice, instanceName, process, jdkVersion)
dimenstionsEvents.append(event)
return (dimensionsMetrics, dimenstionsEvents)
def createWriteRecordCommonAttributes(dimensions):
return { "Dimensions": [{ "Name": dimName, "Value": getattr(dimensions, dimName), "DimensionValueType": "VARCHAR"} for dimName in dimensions._fields] }
def createRandomMetrics(hostId, timestamp, timeUnit, highUtilizationHosts, lowUtilizationHosts):
records = list()
## CPU measures
if hostId in highUtilizationHosts:
cpuUser = 85.0 + 10.0 * random.random()
elif hostId in lowUtilizationHosts:
cpuUser = 10.0 * random.random()
else:
cpuUser = 35.0 + 30.0 * random.random()
records.append(createRecord(measureCpuUser, cpuUser, "DOUBLE", timestamp, timeUnit))
otherCpuMeasures = [measureCpuSystem, measureCpuSteal, measureCpuIowait, measureCpuNice, measureCpuHi, measureCpuSi]
totalOtherUsage = 0.0
for measure in otherCpuMeasures:
value = random.random()
totalOtherUsage += value
records.append(createRecord(measure, value, "DOUBLE", timestamp, timeUnit))
cpuIdle = 100 - cpuUser - totalOtherUsage
records.append(createRecord(measureCpuIdle, cpuIdle, "DOUBLE", timestamp, timeUnit))
remainingMeasures = [measureMemoryFree, measureMemoryUsed, measureMemoryCached, measureDiskIoReads,
meausreDiskIoWrites, measureLatencyPerRead, measureLatencyPerWrite, measureNetworkBytesIn,
measureNetworkBytesOut, measureDiskUsed, measureDiskFree, measureFileDescriptors]
for measure in remainingMeasures:
value = 100.0 * random.random()
records.append(createRecord(measure, value, "DOUBLE", timestamp, timeUnit))
return records
def createRandomEvent(timestamp, timeUnit):
records = list()
records.append(createRecord(measureTaskCompleted, random.randint(0, 500), "BIGINT", timestamp, timeUnit))
records.append(createRecord(measureTaskEndState, np.random.choice(measureValuesForTaskEndState, p=selectionProbabilities), "VARCHAR", timestamp, timeUnit))
remainingMeasures = [measureGcReclaimed, measureGcPause, measureMemoryFree]
for measure in remainingMeasures:
value = 100.0 * random.random()
records.append(createRecord(measure, value, "DOUBLE", timestamp, timeUnit))
return records
def createRecord(measureName, measureValue, valueType, timestamp, timeUnit):
return {
"MeasureName": measureName,
"MeasureValue": str(measureValue),
"MeasureValueType": valueType,
"Time": str(timestamp),
"TimeUnit": timeUnit
}
def printModelSummary(dimensionsMetrics, dimensionsEvents, metricInterval, eventInterval):
print("Dimensions for metrics: {:,}".format(len(dimensionsMetrics)))
print("Dimensions for events: {:,}".format(len(dimensionsEvents)))
numTimeseries = len(dimensionsMetrics) * len(measuresForMetrics) + len(dimensionsEvents) * len(measuresForEvents)
numDataPointsPerSecond = round((1 / metricInterval) * len(dimensionsMetrics) * len(measuresForMetrics) + (1 / eventInterval) * len(dimensionsEvents) * len(measuresForEvents))
numDataPointsPerHour = 3600 * numDataPointsPerSecond
avgMeasureNameLength = np.average([len(x) for x in measuresForMetrics] + [len(x) for x in measuresForEvents])
avgDimensionsSize = np.average([np.sum([len(getattr(dimensionsEvents[0], dimName)) * 2 for dimName in dimensionsEvents[0]._fields]),
np.sum([len(getattr(dimensionsMetrics[0], dimName)) * 2 for dimName in dimensionsMetrics[0]._fields])])
avgRowSize = avgDimensionsSize + avgMeasureNameLength + 16
numMetricsPerSecond = round((1 / metricInterval) * len(dimensionsMetrics) * len(measuresForMetrics))
numEventsPerSecond = round((1 / eventInterval) * len(dimensionsEvents) * len(measuresForEvents))
metricsMeasureBytes = np.average([len(x) for x in measuresForMetrics])
eventsMeasureBytes = np.average([len(x) for x in measuresForEvents])
eventsDimensionBytes = np.sum([len(getattr(dimensionsEvents[0], dimName)) * 2 for dimName in dimensionsEvents[0]._fields])
metricsDimensionBytes = np.sum([len(getattr(dimensionsMetrics[0], dimName)) * 2 for dimName in dimensionsMetrics[0]._fields])
ingestionVolume = round((numMetricsPerSecond * (metricsDimensionBytes + metricsMeasureBytes + 16) + numEventsPerSecond * (eventsDimensionBytes + eventsMeasureBytes + 16)) / (1024.0 * 1024.0), 2)
dataSizePerHour = round(ingestionVolume * 3600 / 1024.0, 2)
dataSizePerDay = round(dataSizePerHour * 24, 2)
dataSizePerYear = round(dataSizePerDay * 365 / 1024.0, 2)
print("avg row size: {} Bytes".format(avgRowSize))
print("Number of timeseries: {:,}. Avg. data points per second: {:,}. Avg. data points per hour: {:,}".format(numTimeseries, numDataPointsPerSecond, numDataPointsPerHour))
print("Avg. Ingestion volume: {:,} MB/s. Data size per hour: {:,} GB. Data size per day: {:,} GB. Data size per year: {:,} TB".format(ingestionVolume, dataSizePerHour, dataSizePerDay, dataSizePerYear))
``` |
{
"source": "jmgrosen/6857project",
"score": 3
} |
#### File: 6857project/topohiding/helperfunctions.py
```python
import random
import itertools
def egcd(a, b):
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
def modinv(a, m):
g, x, y = egcd(a, m)
if g != 1:
raise Exception('modular inverse does not exist')
else:
return x % m
def find_generator(q):
while True:
g = random.randrange(3, 2 * q - 1)
if pow(g, 2, 2*q + 1) != 1 and pow(g, q, 2*q + 1) != 1:
return g
class HPKCR(object):
def __init__(self,g,q):
self.g = g # Generator of group
self.p = 2*q+1 # 2*q + 1 is a prime
self.q = q # q is a prime
#Generate a key given randomness
def key_gen(self):
x = random.randint(1,2*self.q)
sk = x
pk = pow(self.g,x,self.p)
return (pk,sk)
def group(self, a, b):
return (a * b) % self.p
#Encrypt a mesage with the given public key and randomness.
def enc(self,m,pk):
y = random.randint(1,2*self.q)
s = pow(pk,y,self.p)
c1 = pow(self.g,y,self.p)
c2 = (m*s) % self.p
return (c1, c2)
#Decrypt a message with the given secret key.
def dec(self,c,sk):
c1,c2 = c
s = pow(c1,sk,self.p)
m = c2 * modinv(s,self.p) % self.p
return m
#Randomization function for ElGamal
def rand(self,c,pk):
r = random.randint(1,2*self.q-1)
c1,c2 = c
return c1*(pow(self.g,r,self.p)) % self.p, pow(pk,r,self.p)*c2 % self.p
#Adding Layers
def add_layer(self,c,sk):
c1,c2 = c
return (c1, c2*pow(c1,sk,self.p)%self.p)
#Removing Layers
def del_layer(self,c,sk):
c1,c2 = c
return (c1, c2*(modinv(c1**sk,self.p)) % self.p )
#Homomorphic Multiplication
def hmult(self,c,cc):
c1, c2 = c
cc1, cc2 = cc
return (c1*cc1)%self.p, (c2*cc2)%self.p
#Homomorphic OR
def hom_or(self,c,cc,pk):
r1 = random.randint(1,2*self.q-1)
r2 = random.randint(1,2*self.q-1)
for i in range(r1):
c = self.hmult(c,c)
for i in range(r2):
cc = self.hmult(cc,cc)
return self.rand(self.hmult(c,cc),pk)
def embed_msg(self, m):
assert m < self.p - 1
return m + 1
def unembed_msg(self, m):
return (m - 1) % self.p
def testLayers(obj,c,sk):
a = obj.add_layer(c,sk)
b = obj.del_layer(a,sk)
return b == c
def testEnc(obj,m):
pk,sk = obj.key_gen()
return m == obj.dec(obj.enc(m,pk),sk)
class FakeHPKCR(object):
def __init__(self):
self.ctr = itertools.count()
#Generate a key given randomness
def key_gen(self):
keynum = next(self.ctr)
return (f"PK({keynum})", f"SK({keynum})")
def group(self, a, b):
return f"Group({a}, {b})"
#Encrypt a mesage with the given public key and randomness.
def enc(self,m,pk):
return f"Enc({m}, {pk})"
#Decrypt a message with the given secret key.
def dec(self,c,sk):
return f"Dec({c}, {sk})"
#Randomization function for ElGamal
def rand(self,c,pk):
return f"Rand({c}, {pk})"
#Adding Layers
def add_layer(self,c,sk):
return f"AddLayer({c}, {sk})"
#Removing Layers
def del_layer(self,c,sk):
return f"DelLayer({c}, {sk})"
#Homomorphic Multiplication
def hmult(self,c,cc):
return f"HMult({c}, {cc})"
#Homomorphic OR
def hom_or(self,c,cc,pk):
return f"HomOR({c}, {cc}, {pk})"
def embed_msg(self, m):
return f"Embed({m})"
def unembed_msg(self, m):
return f"Unembed({m})"
``` |
{
"source": "jmgrote215/python_example_programs",
"score": 4
} |
#### File: jmgrote215/python_example_programs/first_largest_try.py
```python
def larger(a,loc=False):
try:
maxval = a[0]
indicii = 0
for i in range(1, len(a)):
if a[i] > maxval:
maxval = a[i]
indicii = i
large = sorted(a)
index = len(a)
if loc == True:
#return large[index-1]
return maxval,indicii
else:
return maxval
except ValueError:
print("ah gee rick")
except TypeError:
print("Type Error son")
b = ['a',5,'d',2,8,'z']
larger(b)
``` |
{
"source": "jmguerreroh/ros2_computer_vision",
"score": 2
} |
#### File: computer_vision/launch/tiago_spawn.launch.py
```python
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
from launch_pal.include_utils import include_launch_py_description
import os
from ament_index_python.packages import get_package_share_directory
import yaml
def generate_launch_description():
# This format doesn't work because we have to expand gzpose into
# different args for spawn_entity.py
# gz_pose = DeclareLaunchArgument(
# 'gzpose', default_value='-x 0 -y 0 -z 0.0 -R 0.0 -P 0.0 -Y 0.0 ',
# description='Spawn gazebo position as provided to spawn_entity.py'
# )
# @TODO: load PID gains? used in gazebo_ros_control fork
# @TODO: load tiago_pal_hardware_gazebo
config = os.path.join(
get_package_share_directory('computer_vision'),
'config',
'params.yaml'
)
with open(config, "r") as stream:
try:
conf = (yaml.safe_load(stream))
except yaml.YAMLError as exc:
print(exc)
model_name = DeclareLaunchArgument(
'model_name', default_value='tiago',
description='Gazebo model name'
)
tiago_state_publisher = include_launch_py_description(
'tiago_description',
['launch', 'robot_state_publisher.launch.py'])
tiago_entity = Node(package='gazebo_ros', executable='spawn_entity.py',
arguments=['-topic', 'robot_description',
'-entity', LaunchConfiguration(
'model_name'),
conf['computer_vision']['tiago_position']['x'],
conf['computer_vision']['tiago_position']['y'],
conf['computer_vision']['tiago_position']['z'],
conf['computer_vision']['tiago_position']['roll'],
conf['computer_vision']['tiago_position']['pitch'],
conf['computer_vision']['tiago_position']['yaw'],
# LaunchConfiguration('gzpose'),
],
output='screen')
return LaunchDescription([
# gz_pose,
model_name,
tiago_state_publisher,
tiago_entity,
])
``` |
{
"source": "jm-gutierrez/CrearSolicitudCronMutode",
"score": 3
} |
#### File: jm-gutierrez/CrearSolicitudCronMutode/Email.py
```python
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import Settings
# set up the SMTP server
class Email:
@staticmethod
def send_email(name, tittle, email):
s = smtplib.SMTP(host=Settings.EMAIL_SMTP_HOST, port=Settings.EMAIL_SMTP_PORT)
s.starttls()
s.login(Settings.EMAIL_USERNAME, Settings.EMAIL_PASSWORD)
msg = MIMEMultipart() # create a message
message = "Gracias por usar la aplicacion " + name + "\n Su prueba" + tittle + " ya fue ejecutada"
msg['From'] = Settings.EMAIL_ADDRESS
msg['To'] = email
msg['Subject'] = "Pruebas Automaticas"
msg.attach(MIMEText(message, 'plain'))
s.send_message(msg)
del msg
s.quit()
``` |
{
"source": "jmguzik/podman-py",
"score": 4
} |
#### File: podman/api/parse_utils.py
```python
import base64
import json
from typing import Any, Dict, MutableMapping, Optional, Tuple
def parse_repository(name: str) -> Tuple[str, Optional[str]]:
"""Parse repository image name from tag or digest
Returns:
item 1: repository name
item 2: Either digest and tag, tag, or None
"""
# split image name and digest
elements = name.split("@", 1)
if len(elements) == 2:
return elements[0], elements[1]
# split repository and image name from tag
elements = name.split(":", 1)
if len(elements) == 2 and "/" not in elements[1]:
return elements[0], elements[1]
return name, None
def decode_header(value: Optional[str]) -> Dict[str, Any]:
"""Decode a base64 JSON header value."""
if value is None:
return {}
value = base64.b64decode(value)
text = value.decode("utf-8")
return json.loads(text)
def prepare_body(body: MutableMapping[str, Any]) -> str:
"""Strip out any items without a value."""
if body is None:
return ""
targets = {k: v for (k, v) in body.items() if v is None}
for key in targets:
del body[key]
return json.dumps(body)
```
#### File: podman/api/ssh.py
```python
from typing import Any, Mapping, Optional, Union
from urllib.parse import urlparse
from requests.adapters import HTTPAdapter
from requests.packages.urllib3 import HTTPConnectionPool # pylint: disable=import-error
from requests.packages.urllib3.connection import HTTPConnection # pylint: disable=import-error
from requests.packages.urllib3.util import Timeout # pylint: disable=import-error
class SSHConnection(HTTPConnection):
"""Specialization of HTTPConnection to use a ssh tunnel."""
def __init__(
self,
host: str,
timeout: Optional[Union[float, Timeout]] = None,
):
"""Instantiate connection to ssh tunnel for Podman service for HTTP client."""
_ = host
_ = timeout
raise NotImplementedError
def connect(self):
"""Returns socket for ssh tunnel."""
raise NotImplementedError
def __del__(self):
"""Cleanup connection."""
raise NotImplementedError
class SSHConnectionPool(HTTPConnectionPool):
"""Specialization of urllib3 HTTPConnectionPool for ssh tunnels."""
# pylint: disable=too-few-public-methods
def __init__(
self,
host: str,
timeout: Optional[Union[float, Timeout]] = None,
) -> None:
if isinstance(timeout, float):
timeout = Timeout.from_float(timeout)
_ = host
def _new_conn(self) -> SSHConnection:
return SSHConnection(self.host, self.timeout)
class SSHAdapter(HTTPAdapter):
"""Specialization of requests transport adapter for ssh tunnels."""
# Abstract methods (get_connection) are specialized and pylint cannot walk hierarchy.
# pylint: disable=arguments-differ
# pylint: disable=too-few-public-methods
def __init__(self, *args, **kwargs):
self.timeout = None
if "timeout" in kwargs:
self.timeout = kwargs.pop("timeout")
super().__init__(*args, **kwargs)
def get_connection(self, host, proxies: Mapping[str, Any] = None) -> SSHConnectionPool:
"""Returns ssh tunneled connection to Podman service."""
if len(proxies) > 0:
uri = urlparse(host)
if uri.scheme in proxies:
raise ValueError(f"{self.__class__.__name__} does not support proxies.")
return SSHConnectionPool(host, timeout=self.timeout)
```
#### File: podman/api/url_utils.py
```python
import json
from typing import Dict, List, Mapping, Optional, Union
def format_filters(filters: Union[str, List[str], Mapping[str, str]]) -> Optional[str]:
"""Returns filters as an URL quoted JSON Dict[str, List[str]]."""
if filters is None or len(filters) == 0:
return None
criteria: Dict[str, List[str]] = {}
if isinstance(filters, str):
_format_string(filters, criteria)
elif isinstance(filters, dict):
_format_dict(filters, criteria)
else:
_format_list(filters, criteria)
if len(criteria) == 0:
return None
return json.dumps(criteria)
def _format_list(filters, criteria):
for item in filters:
if item is None:
continue
key, value = item.split("=", 1)
if key in criteria:
criteria[key].append(value)
else:
criteria[key] = [value]
def _format_dict(filters, criteria):
for key, value in filters.items():
if value is None:
continue
if key in criteria:
criteria[key].append(value)
else:
criteria[key] = [value]
def _format_string(filters, criteria):
key, value = filters.split("=", 1)
criteria[key] = [value]
```
#### File: podman/domain/containers.py
```python
import io
import json
from typing import Any, Dict, Iterable, Iterator, List, Mapping, Optional, Sequence, Tuple, Union
from podman import api
from podman.domain.images import Image
from podman.domain.images_manager import ImagesManager
from podman.domain.manager import PodmanResource
from podman.errors import APIError, NotFound
class Container(PodmanResource):
"""Details and configuration for a container managed by the Podman service."""
@property
def name(self) -> Optional[str]:
"""Returns container's name."""
if "name" in self.attrs:
return self.attrs["Name"].lstrip("/")
return None
@property
def image(self) -> Image:
"""Returns Image object used to create Container."""
if "Image" in self.attrs:
image_id = self.attrs["Image"].split(":")[1]
return ImagesManager(client=self.client).get(image_id)
return Image()
@property
def labels(self) -> Dict[str, str]:
"""Returns labels associated with container."""
if "Config" in self.attrs and "Labels" in self.attrs["Config"]:
return self.attrs["Config"]["Labels"]
return {}
@property
def status(self) -> str:
"""Returns operational status of container.
Example:
'running', 'stopped', 'exited'
"""
if "State" in self.attrs and "Status" in self.attrs["State"]:
return self.attrs["State"]["Status"]
return "unknown"
@property
def ports(self) -> Dict[str, int]:
"""Returns ports exposed by container."""
if "NetworkSettings" in self.attrs and "Ports" in self.attrs["NetworkSettings"]:
return self.attrs["NetworkSettings"]["Ports"]
return {}
def attach(self, **kwargs) -> Union[str, Iterator[str]]:
"""Attach to container's tty.
Keyword Args:
stdout (bool): Include stdout. Default: True
stderr (bool): Include stderr. Default: True
stream (bool): Return iterator of string(s) vs single string. Default: False
logs (bool): Include previous container output. Default: False
"""
raise NotImplementedError()
def attach_socket(self, **kwargs):
"""TBD."""
raise NotImplementedError()
def commit(self, repository: str = None, tag: str = None, **kwargs) -> Image:
"""Save container to given repository using given parameters.
Args:
repository: Where to save Image
tag: Tag to push with Image
Keyword Args:
author (str): Name of commit author
changes (List[str]): Instructions to apply during commit
comment (List[str]): Instructions to apply while committing in Dockerfile format
conf (Dict[str, Any]): Ignored
format (str): Format of the image manifest and metadata
message (str): Commit message to include with Image
pause (bool): Pause the container before committing it
See https://docs.podman.io/en/latest/_static/api.html#operation/libpodCommitContainer
"""
params = {
"author": kwargs.get("author", None),
"changes": kwargs.get("changes", None),
"comment": kwargs.get("comment", None),
"container": self.id,
"format": kwargs.get("format", None),
"pause": kwargs.get("pause", None),
"repo": repository,
"tag": tag,
}
response = self.client.post("/commit", params=params)
body = response.json()
if response.status_code != 201:
if response.status_code == 404:
raise NotFound(body["cause"], response=response, explanation=body["message"])
raise APIError(body["cause"], response=response, explanation=body["message"])
return ImagesManager(client=self.client).get(body["ID"])
def diff(self) -> List[Dict[str, int]]:
"""Report changes on container's filesystem.
Raises:
APIError when service reports error
"""
response = self.client.get(f"/containers/{self.id}/changes")
body = response.json()
if response.status_code == 200:
return body
if response.status_code == 404:
raise NotFound(body["cause"], response=response, explanation=body["message"])
raise APIError(body["cause"], response=response, explanation=body["message"])
def exec_run(
self,
cmd: Union[str, List[str]],
stdout: bool = True,
stderr: bool = True,
stdin: bool = False,
tty: bool = False,
privileged: bool = False,
user=None,
detach: bool = False,
stream: bool = False,
socket: bool = False,
environment: Union[Mapping[str, str], List[str]] = None,
workdir: str = None,
demux: bool = False,
) -> Tuple[
Optional[int], Union[Iterator[bytes], Any, Tuple[bytes, bytes]]
]: # pylint: disable=too-many-arguments,unused-argument
"""Run given command inside container and return results.
Args:
cmd: Command to be executed
stdout: Attach to stdout. Default: True
stderr: Attach to stderr. Default: True
stdin: Attach to stdin. Default: False
tty: Allocate a pseudo-TTY. Default: False
privileged: Run as privileged.
user: User to execute command as. Default: root
detach: If true, detach from the exec command.
Default: False
stream: Stream response data. Default: False
socket: Return the connection socket to allow custom
read/write operations. Default: False
environment: A dictionary or a List[str] in
the following format ["PASSWORD=<PASSWORD>"] or
{"PASSWORD": "<PASSWORD>"}.
workdir: Path to working directory for this exec session
demux: Return stdout and stderr separately
Returns:
TBD
Raises:
APIError when service reports error
"""
if user is None:
user = "root"
raise NotImplementedError()
def export(self, chunk_size: int = api.DEFAULT_CHUNK_SIZE) -> Iterator[bytes]:
"""Download container's filesystem contents as a tar archive.
Args:
chunk_size: <= number of bytes to return for each iteration of the generator.
Yields:
tarball in size/chunk_size chunks
Raises:
NotFound when container has been removed from service
APIError when service reports an error
"""
response = self.client.get(f"/containers/{self.id}/export", stream=True)
if response.status_code != 200:
body = response.json()
if response.status_code == 404:
raise NotFound(body["cause"], response=response, explanation=body["message"])
raise APIError(body["cause"], response=response, explanation=body["message"])
for out in response.iter_content(chunk_size=chunk_size):
yield out
def get_archive(
self, path: str, chunk_size: int = api.DEFAULT_CHUNK_SIZE
) -> Tuple[Iterable, Dict[str, Any]]:
"""Download a file or folder from the container's filesystem.
Args:
path: Path to file or folder.
chunk_size: <= number of bytes to return for each iteration of the generator.
Returns:
First item is a raw tar data stream.
Second item is a dict containing stat information on the specified path.
"""
response = self.client.get(f"/containers/{self.id}/archive", params={"path": [path]})
if response.status_code != 200:
body = response.json()
if response.status_code == 404:
raise NotFound(body["cause"], response=response, explanation=body["message"])
raise APIError(body["cause"], response=response, explanation=body["message"])
stat = response.headers.get('x-docker-container-path-stat', None)
stat = api.decode_header(stat)
return response.iter_content(chunk_size=chunk_size), stat
def kill(self, signal: Union[str, int, None] = None) -> None:
"""Send signal to container. """
params = {}
if signal is None:
params = {"signal": signal}
response = self.client.post(f"/containers/{self.id}/kill", params=params)
if response.status_code == 204:
return
body = response.json()
raise APIError(body["cause"], response=response, explanation=body["message"])
def logs(self, **kwargs) -> Union[str, Iterator[str]]:
"""Get logs from container.
Keyword Args:
stdout (bool): Include stdout. Default: True
stderr (bool): Include stderr. Default: True
stream (bool): Return generator of strings as the response. Default: False
timestamps (bool): Show timestamps in output. Default: False
tail (Union[str, int]): Output specified number of lines at the end of
logs. Either an integer of number of lines or the string all. Default: all
since (Union[datetime, int]): Show logs since a given datetime or
integer epoch (in seconds)
follow (bool): Follow log output. Default: False
until (Union[datetime, int]): Show logs that occurred before the given
datetime or integer epoch (in seconds)
"""
raise NotImplementedError()
def pause(self) -> None:
"""Pause processes within container."""
response = self.client.post(f"/containers/{self.id}/pause")
if response.status_code == 204:
return
body = response.json()
if response.status_code == 404:
raise NotFound(body["cause"], response=response, explanation=body["message"])
raise APIError(body["cause"], response=response, explanation=body["message"])
def put_archive(self, path: Union[bytes, str, None] = None, data: bytes = None) -> bool:
"""Upload tar archive containing a file or folder to be written into container.
Args:
path: File to write data into
data: Contents to write to file
Returns:
True when successful
Raises:
APIError when server reports error
Notes:
- path must exist.
"""
if not path or not data:
raise ValueError("path and data (tar archive) are required parameters.")
response = self.client.put(
f"/containers/{self.id}/archive", params={"path": path}, data=data
)
return response.status_code == 200
def remove(self, **kwargs) -> None:
"""Delete container.
Keyword Args:
v (bool): Delete associated volumes as well.
link (bool): Ignored.
force (bool): Kill a running container before deleting.
"""
params = {}
if "v" in kwargs:
params["v"] = kwargs["v"]
if "force" in kwargs:
params["force"] = kwargs["force"]
response = self.client.delete(f"/containers/{self.id}", params=params)
if response.status_code == 204:
return
body = response.json()
raise APIError(body["cause"], response=response, explanation=body["message"])
def rename(self, name: Optional[str] = None) -> None:
"""Rename container.
Args:
name: New name for container.
"""
if not name:
raise ValueError("name is a required parameter.")
response = self.client.post(f"/containers/{self.id}/rename", params={"name": name})
if response.status_code == 204:
self.attrs["Name"] = name
return
body = response.json()
raise APIError(body["cause"], response=response, explanation=body["message"])
def resize(self, height: int = None, width: int = None) -> None:
"""Resize the tty session.
Args:
height: New height of tty session.
width: New width of tty session.
"""
params = {
"h": height,
"w": width,
}
response = self.client.post(f"/containers/{self.id}/resize", params=params)
if response.status_code == 200:
return
body = response.json()
if response.status_code == 404:
raise NotFound(body["cause"], response=response, explanation=body["message"])
raise APIError(body["cause"], response=response, explanation=body["message"])
def restart(self, **kwargs) -> None:
"""Restart processes in container.
Keyword Args:
timeout (int): Seconds to wait for container to stop before killing container.
"""
connection_timeout = api.DEFAULT_TIMEOUT
params = {}
if "timeout" in kwargs:
params = {"timeout": kwargs["timeout"]}
connection_timeout += float(kwargs["timeout"])
response = self.client.post(
f"/containers/{self.id}/restart", params=params, timeout=connection_timeout
)
if response.status_code == 204:
return
body = response.json()
raise APIError(body["cause"], response=response, explanation=body["message"])
def start(self, **kwargs) -> None:
"""Start processes in container.
Keyword Args:
detach_keys: Override the key sequence for detaching a container (Podman only)
"""
params = {}
if "detach_keys" in kwargs:
params = {"detachKeys": kwargs["detach_keys"]}
response = self.client.post(f"/containers/{self.id}/start", params=params)
if response.status_code == 204:
return
body = response.json()
raise APIError(body["cause"], response=response, explanation=body["message"])
def stats(self, **kwargs) -> Union[Sequence[Dict[str, bytes]], bytes]:
"""Return statistics for container.
Keyword Args:
decode (bool): If True and stream is True, stream will be decoded into dict's.
Default: False.
stream (bool): Stream statistics until cancelled. Default: True.
Raises:
APIError when service reports an error
"""
# FIXME Errors in stream are not handled, need content and json to read Errors.
stream = kwargs.get("stream", True)
decode = kwargs.get("decode", False)
params = {
"containers": self.id,
"stream": stream,
}
response = self.client.get("/containers/stats", params=params)
if response.status_code != 200:
body = response.json()
if response.status_code == 404:
raise NotFound(body["cause"], response=response, explanation=body["message"])
raise APIError(body["cause"], response=response, explanation=body["message"])
if stream:
return self._stats_helper(decode, response.iter_lines())
with io.StringIO() as buffer:
for entry in response.text:
buffer.writer(json.dumps(entry) + "\n")
return buffer.getvalue()
@staticmethod
def _stats_helper(
decode: bool, body: List[Dict[str, Any]]
) -> Iterator[Union[str, Dict[str, Any]]]:
"""Helper needed to allow stats() to return either a generator or a str."""
for entry in body:
if decode:
yield json.loads(entry)
else:
yield entry
def stop(self, **kwargs):
"""Stop container.
Keyword Args:
all (bool): When True, stop all containers. Default: False (Podman only)
ignore (bool): When True, ignore error if container already stopped (Podman only)
timeout (int): Number of seconds to wait on container to stop before killing it.
"""
connection_timeout = api.DEFAULT_TIMEOUT
params = {}
if "all" in kwargs:
params["all"] = kwargs["all"]
if "timeout" in kwargs:
params["timeout"] = kwargs["timeout"]
connection_timeout += float(kwargs["timeout"])
response = self.client.post(
f"/containers/{self.id}/stop", params=params, timeout=connection_timeout
)
if response.status_code == 204:
return
if response.status_code == 304:
if kwargs.get("ignore", False):
return
body = response.json()
raise APIError(body["cause"], response=response, explanation=body["message"])
def top(self, **kwargs) -> Dict[str, Any]:
"""Report on running processes in container.
Keyword Args:
ps_args (str): Optional arguments passed to ps
"""
params = {
"ps_args": kwargs.get("ps_args", None),
"stream": kwargs.get("stream", None),
}
response = self.client.get(f"/containers/{self.id}/top", params=params)
body = response.json()
if response.status_code != 200:
if response.status_code == 404:
raise NotFound(body["cause"], response=response, explanation=body["message"])
raise APIError(body["cause"], response=response, explanation=body["message"])
return body
def unpause(self) -> None:
"""Unpause processes in container."""
response = self.client.post(f"/containers/{self.id}/unpause")
if response.status_code == 204:
return
body = response.json()
raise APIError(body["cause"], response=response, explanation=body["message"])
def update(self, **kwargs):
"""Update resource configuration of the containers.
Note:
Podman unsupported operation
"""
raise NotImplementedError("container update is not supported by Podman.")
def wait(self, **kwargs) -> Dict[str, Any]:
"""Block until container enters given state.
Keyword Args:
condition (str): Container state on which to release, values:
not-running (default), next-exit or removed.
timeout (int): Number of seconds to wait for container to stop.
Returns:
API response as a dict, including the container's exit code under the key StatusCode.
Raises:
ReadTimeoutError: If the timeout is exceeded.
APIError: If the service returns as error.
"""
params = {"condition": kwargs.get("condition", None)}
response = self.client.post(f"/containers/{self.id}/wait", params=params)
if response.status_code == 204:
return
body = response.json()
raise APIError(body["cause"], response=response, explanation=body["message"])
```
#### File: podman/errors/exceptions.py
```python
from typing import Iterable, Optional
from requests import Response
from requests.exceptions import HTTPError
class APIError(HTTPError):
"""A wrapper for HTTP errors from the API."""
def __init__(
self, message: str, response: Optional[Response] = None, explanation: Optional[str] = None
):
super().__init__(message, response=response)
self.explanation = explanation
def __str__(self):
msg = super().__str__()
if self.response is not None:
msg = self.response.reason
if self.is_client_error():
msg = f"{self.status_code} Client Error: {msg}"
elif self.is_server_error():
msg = f"{self.status_code} Server Error: {msg}"
if self.explanation:
msg = f"{msg} ({self.explanation})"
return msg
@property
def status_code(self) -> Optional[int]:
"""HTTP status code from response."""
if self.response is not None:
return self.response.status_code
return None
def is_error(self) -> bool:
"""Returns True if an HTTP occurred."""
return self.is_client_error() or self.is_server_error()
def is_client_error(self) -> bool:
"""Returns True if error occurred in request."""
return 400 <= (self.status_code or 0) < 500
def is_server_error(self) -> bool:
"""Returns True if error occurred in service."""
return 500 <= (self.status_code or 0) < 600
class NotFound(APIError):
"""Resource not found on Podman service.
Notes:
Compatible name, missing Error suffix.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class ImageNotFound(APIError):
"""Image not found on Podman service.
Notes:
Compatible name, missing Error suffix.
"""
class DockerException(Exception):
"""Base class for exception hierarchy.
Notes:
* Provided for compatibility.
"""
class PodmanError(DockerException):
"""Base class for PodmanPy exceptions."""
class BuildError(PodmanError):
"""Error occurred during build operation."""
def __init__(self, reason: str, build_log: Iterable[str]) -> None:
"""Create BuildError.
Args:
reason: describes the error
build_log: build log output
"""
super().__init__(reason)
self.msg = reason
self.build_log = build_log
class InvalidArgument(PodmanError):
"""Parameter to method/function was not valid."""
```
#### File: tests/unit/test_image.py
```python
import io
import unittest
import requests_mock
from podman import PodmanClient
from podman.domain.images_manager import ImagesManager
FIRST_IMAGE = {
"Id": "326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab",
"ParentId": "",
"RepoTags": ["fedora:latest", "fedora:33", "<none>:<none>"],
"RepoDigests": [
"fedora@sha256:9598a10fa72b402db876ccd4b3d240a4061c7d1e442745f1896ba37e1bf38664"
],
"Created": 1614033320,
"Size": 23855104,
"VirtualSize": 23855104,
"SharedSize": 0,
"Labels": {},
"Containers": 2,
}
SECOND_IMAGE = {
"Id": "c4b16966ecd94ffa910eab4e630e24f259bf34a87e924cd4b1434f267b0e354e",
"ParentId": "",
"RepoDigests": [
"fedora@sha256:4a877de302c6463cb624ddfe146ad850413724462ec24847832aa6eb1e957746"
],
"Created": 1614033320,
"Size": 23855104,
"VirtualSize": 23855104,
"SharedSize": 0,
"Containers": 0,
}
class TestClientImages(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
self.client = PodmanClient(base_url="http+unix://localhost:9999")
def tearDown(self) -> None:
super().tearDown()
self.client.close()
def test_podmanclient(self):
manager = self.client.images
self.assertIsInstance(manager, ImagesManager)
@requests_mock.Mocker()
def test_history(self, mock):
mock.get(
"http+unix://localhost:9999/v3.0.0/libpod/images"
"/326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab/history",
json=[
{
"Id": "326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab",
"Comment": "",
"Created": 1614208404,
"CreatedBy": "2021-02-24T23:13:24+00:00",
"Tags": ["latest"],
"Size": 1024,
}
],
)
mock.get(
"http+unix://localhost:9999/v3.0.0/libpod/images"
"/326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab/json",
json=FIRST_IMAGE,
)
image = self.client.images.get(
"326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab"
)
history = image.history()
self.assertEqual(history[0]["Id"], image.id)
@requests_mock.Mocker()
def test_reload(self, mock):
update = FIRST_IMAGE.copy()
update["Containers"] = 0
mock.get(
"http+unix://localhost:9999/v3.0.0/libpod/images"
"/326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab/json",
[
{"json": FIRST_IMAGE},
{"json": update},
],
)
image = self.client.images.get(
"326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab"
)
self.assertEqual(image.attrs["Containers"], 2)
image.reload()
self.assertEqual(image.attrs["Containers"], 0)
@requests_mock.Mocker()
def test_save(self, mock):
tarball = b'Yet another weird tarball...'
body = io.BytesIO(tarball)
mock.get(
"http+unix://localhost:9999/v3.0.0/libpod/images"
"/326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab/json",
json=FIRST_IMAGE,
)
mock.get(
"http+unix://localhost:9999/v3.0.0/libpod/images/"
"326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab/get",
body=body,
)
image = self.client.images.get(
"326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab"
)
with io.BytesIO() as fd:
for chunk in image.save():
fd.write(chunk)
self.assertEqual(fd.getbuffer(), tarball)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/unit/test_imagesmanager.py
```python
import types
import unittest
from collections import Iterable
import requests_mock
from podman import PodmanClient
from podman.domain.images_manager import ImagesManager
from podman.domain.images import Image
from podman.errors.exceptions import APIError, ImageNotFound
FIRST_IMAGE = {
"Id": "sha256:326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab",
"ParentId": "",
"RepoTags": ["fedora:latest", "fedora:33", "<none>:<none>"],
"RepoDigests": [
"fedora@sha256:9598a10fa72b402db876ccd4b3d240a4061c7d1e442745f1896ba37e1bf38664"
],
"Created": 1614033320,
"Size": 23855104,
"VirtualSize": 23855104,
"SharedSize": 0,
"Labels": {
"license": " Apache-2.0"
},
"Containers": 2,
}
SECOND_IMAGE = {
"Id": "c4b16966ecd94ffa910eab4e630e24f259bf34a87e924cd4b1434f267b0e354e",
"ParentId": "",
"RepoDigests": [
"fedora@sha256:4a877de302c6463cb624ddfe146ad850413724462ec24847832aa6eb1e957746"
],
"Created": 1614033320,
"Size": 23855104,
"VirtualSize": 23855104,
"SharedSize": 0,
"Containers": 0,
}
class TestClientImagesManager(unittest.TestCase):
"""Test ImagesManager area of concern.
Note:
Mock responses need to be coded for libpod returns. The python bindings are responsible
for mapping to compatible output.
"""
def setUp(self) -> None:
super().setUp()
self.client = PodmanClient(base_url="http+unix://localhost:9999")
def tearDown(self) -> None:
super().tearDown()
self.client.close()
def test_podmanclient(self):
manager = self.client.images
self.assertIsInstance(manager, ImagesManager)
@requests_mock.Mocker()
def test_list_empty(self, mock):
"""Unit test Images list()."""
mock.get(
"http+unix://localhost:9999/v3.0.0/libpod/images/json",
text="[]",
)
images = self.client.images.list()
self.assertEqual(len(images), 0)
@requests_mock.Mocker()
def test_list_1(self, mock):
"""Unit test Images list()."""
mock.get(
"http+unix://localhost:9999/v3.0.0/libpod/images/json",
json=[FIRST_IMAGE],
)
images = self.client.images.list()
self.assertEqual(len(images), 1)
self.assertIsInstance(images[0], Image)
self.assertEqual(str(images[0]), "<Image: 'fedora:latest', 'fedora:33'>")
self.assertEqual(
images[0].id, "sha256:326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab"
)
self.assertIsInstance(images[0].labels, dict)
self.assertEqual(len(images[0].labels), 1)
self.assertEqual(images[0].short_id, "sha256:326dd9d7ad")
self.assertIsInstance(images[0].tags, list)
self.assertEqual(len(images[0].tags), 2)
@requests_mock.Mocker()
def test_list_2(self, mock):
"""Unit test Images list()."""
mock.get(
"http+unix://localhost:9999/v3.0.0/libpod/images/json",
json=[FIRST_IMAGE, SECOND_IMAGE],
)
images = self.client.images.list()
self.assertEqual(len(images), 2)
self.assertIsInstance(images[0], Image)
self.assertIsInstance(images[1], Image)
self.assertEqual(images[1].short_id, "c4b16966ec")
self.assertIsInstance(images[1].labels, dict)
self.assertEqual(len(images[1].labels), 0)
self.assertIsInstance(images[1].tags, list)
self.assertEqual(len(images[1].tags), 0)
@requests_mock.Mocker()
def test_list_filters(self, mock):
"""Unit test filters param for Images list()."""
mock.get(
"http+unix://localhost:9999/v3.0.0/libpod/images/json"
"?filters=%7B%22dangling%22%3A+%5Btrue%5D%7D",
json=[FIRST_IMAGE],
)
images = self.client.images.list(filters={"dangling": True})
self.assertEqual(
images[0].id, "sha256:326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab"
)
@requests_mock.Mocker()
def test_list_all(self, mock):
"""Unit test filters param for Images list()."""
mock.get(
"http+unix://localhost:9999/v3.0.0/libpod/images/json?all=true",
json=[FIRST_IMAGE],
)
images = self.client.images.list(all=True)
self.assertEqual(
images[0].id, "sha256:326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab"
)
@requests_mock.Mocker()
def test_prune(self, mock):
"""Unit test Images prune()."""
mock.post(
"http+unix://localhost:9999/v3.0.0/libpod/images/prune",
json=[
{
"Id": "326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab",
"Err": None,
"Size": 1024,
}
],
)
results = self.client.images.prune()
self.assertIn("ImagesDeleted", results)
self.assertIn("SpaceReclaimed", results)
self.assertEqual(
results["ImagesDeleted"][0]["Deleted"],
"326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab",
)
self.assertEqual(results["SpaceReclaimed"], 1024)
@requests_mock.Mocker()
def test_prune_filters(self, mock):
"""Unit test filters param for Images prune()."""
mock.post(
"http+unix://localhost:9999/v3.0.0/libpod/images/prune"
"?filters=%7B%22dangling%22%3A+%5Btrue%5D%7D",
json=[
{
"Id": "326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab",
"Size": 1024,
},
{
"Id": "c4b16966ecd94ffa910eab4e630e24f259bf34a87e924cd4b1434f267b0e354e",
"Size": 1024,
},
],
)
report = self.client.images.prune(filters={"dangling": True})
self.assertIn("ImagesDeleted", report)
self.assertIn("SpaceReclaimed", report)
self.assertEqual(report["SpaceReclaimed"], 2048)
deleted = [r["Deleted"] for r in report["ImagesDeleted"] if "Deleted" in r]
self.assertEqual(len(deleted), 2)
self.assertIn("326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab", deleted)
self.assertGreater(len("".join(deleted)), 0)
untagged = [r["Untagged"] for r in report["ImagesDeleted"] if "Untagged" in r]
self.assertEqual(len(untagged), 2)
self.assertEqual(len("".join(untagged)), 0)
@requests_mock.Mocker()
def test_prune_failure(self, mock):
"""Unit test to report error carried in response body."""
mock.post(
"http+unix://localhost:9999/v3.0.0/libpod/images/prune",
json=[
{
"Err": "Test prune failure in response body.",
}
],
)
with self.assertRaises(APIError) as e:
self.client.images.prune()
self.assertEqual(e.exception.explanation, "Test prune failure in response body.")
@requests_mock.Mocker()
def test_get(self, mock):
mock.get(
"http+unix://localhost:9999/v3.0.0/libpod/images/fedora%3Alatest/json",
json=FIRST_IMAGE,
)
image = self.client.images.get("fedora:latest")
self.assertIsInstance(image, Image)
self.assertDictEqual(FIRST_IMAGE["Labels"], image.attrs["Labels"])
@requests_mock.Mocker()
def test_get_oserror(self, mock):
mock.get(
"http+unix://localhost:9999/v3.0.0/libpod/images/bad_image/json",
exc=OSError,
)
with self.assertRaises(APIError) as e:
_ = self.client.images.get("bad_image")
self.assertEqual(str(e.exception), "/images/bad_image/json")
@requests_mock.Mocker()
def test_get_404(self, mock):
mock.get(
"http+unix://localhost:9999/v3.0.0/libpod/images/bad_image/json",
status_code=404,
json={
"cause": "Image not found",
"message": "Image not found",
"response": 404,
},
)
with self.assertRaises(ImageNotFound):
_ = self.client.images.get("bad_image")
@requests_mock.Mocker()
def test_get_500(self, mock):
mock.get(
"http+unix://localhost:9999/v3.0.0/libpod/images/bad_image/json",
status_code=500,
json={
"cause": "Server error",
"message": "Server error",
"response": 500,
},
)
with self.assertRaises(APIError):
_ = self.client.images.get("bad_image")
@requests_mock.Mocker()
def test_remove(self, mock):
mock.delete(
"http+unix://localhost:9999/v3.0.0/libpod/images/fedora:latest",
json={
"Untagged": ["326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab"],
"Deleted": [
"326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab",
"c4b16966ecd94ffa910eab4e630e24f259bf34a87e924cd4b1434f267b0e354e",
],
"Errors": [],
"ExitCode": 0,
},
)
report = self.client.images.remove("fedora:latest")
self.assertEqual(len(report), 4)
deleted = [r["Deleted"] for r in report if "Deleted" in r]
self.assertEqual(len(deleted), 2)
untagged = [r["Untagged"] for r in report if "Untagged" in r]
self.assertEqual(len(untagged), 1)
errors = [r["Errors"] for r in report if "Errors" in r]
self.assertEqual(len(errors), 0)
codes = [r["ExitCode"] for r in report if "ExitCode" in r]
self.assertEqual(len(codes), 1)
self.assertEqual(codes[0], 0)
@requests_mock.Mocker()
def test_load(self, mock):
mock.post(
"http+unix://localhost:9999/v3.0.0/libpod/images/load",
json={"Names": ["quay.io/fedora:latest"]},
)
mock.get(
"http+unix://localhost:9999/v3.0.0/libpod/images/quay.io%2Ffedora%3Alatest/json",
json=FIRST_IMAGE,
)
gntr = self.client.images.load(b'This is a weird tarball...')
self.assertIsInstance(gntr, types.GeneratorType)
report = list(gntr)
self.assertEqual(len(report), 1)
self.assertEqual(
report[0].id, "sha256:326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab"
)
@requests_mock.Mocker()
def test_search(self, mock):
mock.get(
"http+unix://localhost:9999/v3.0.0/libpod/images/search?term=fedora&noTrunc=true",
json=[
{
"description": "mock term=fedora search",
"is_official": False,
"is_automated": False,
"name": "quay.io/libpod/fedora",
"star_count": 0,
},
],
)
report = self.client.images.search("fedora")
self.assertEqual(len(report), 1)
self.assertEqual(report[0]["name"], "quay.io/libpod/fedora")
@requests_mock.Mocker()
def test_search_oserror(self, mock):
mock.get(
"http+unix://localhost:9999/v3.0.0/libpod/images/search?term=fedora&noTrunc=true",
exc=OSError,
)
with self.assertRaises(OSError):
self.client.images.search("fedora")
@requests_mock.Mocker()
def test_search_500(self, mock):
mock.get(
"http+unix://localhost:9999/v3.0.0/libpod/images/search?term=fedora&noTrunc=true",
status_code=500,
json={
"cause": "Server error",
"message": "Server error",
"response": 500,
},
)
with self.assertRaises(OSError):
self.client.images.search("fedora")
@requests_mock.Mocker()
def test_search_limit(self, mock):
mock.get(
"http+unix://localhost:9999/v3.0.0/libpod/images/search?term=fedora&noTrunc=true&limit=5",
json=[
{
"description": "mock term=fedora search",
"is_official": False,
"is_automated": False,
"name": "quay.io/libpod/fedora",
"star_count": 0,
},
],
)
report = self.client.images.search("fedora", limit=5)
self.assertEqual(len(report), 1)
self.assertEqual(report[0]["name"], "quay.io/libpod/fedora")
@requests_mock.Mocker()
def test_search_filters(self, mock):
mock.get(
"http+unix://localhost:9999/v3.0.0/libpod/images/search"
"?term=fedora&noTrunc=true&filters=%7B%22stars%22%3A+%5B5%5D%7D",
json=[
{
"description": "mock term=fedora search",
"is_official": False,
"is_automated": False,
"name": "quay.io/libpod/fedora",
"star_count": 0,
},
],
)
report = self.client.images.search("fedora", filters={"stars": 5})
self.assertEqual(len(report), 1)
self.assertEqual(report[0]["name"], "quay.io/libpod/fedora")
@requests_mock.Mocker()
def test_push(self, mock):
mock.post("http+unix://localhost:9999/v3.0.0/libpod/images/quay.io%2Ffedora/push")
report = self.client.images.push("quay.io/fedora", "latest")
expected = r"""{"status": "Pushing repository quay.io/fedora (1 tags)"}
{"status": "Pushing", "progressDetail": {}, "id": "quay.io/fedora"}
"""
self.assertEqual(report, expected)
@requests_mock.Mocker()
def test_pull(self, mock):
image_id = "sha256:326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab"
mock.post(
"http+unix://localhost:9999/v3.0.0/libpod/images/pull"
"?reference=quay.io%2Ffedora%3Alatest",
json={
"error": "",
"id": image_id,
"images": [image_id],
"stream": "",
},
)
mock.get(
"http+unix://localhost:9999/v3.0.0/libpod/images"
"/sha256%3A326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab/json",
json=FIRST_IMAGE,
)
image = self.client.images.pull("quay.io/fedora", "latest")
self.assertEqual(image.id, image_id)
@requests_mock.Mocker()
def test_pull_2x(self, mock):
image_id = "sha256:326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab"
mock.post(
"http+unix://localhost:9999/v3.0.0/libpod/images/pull"
"?reference=quay.io%2Ffedora&allTags=True",
json={
"error": "",
"id": image_id,
"images": [
image_id,
"c4b16966ecd94ffa910eab4e630e24f259bf34a87e924cd4b1434f267b0e354e",
],
"stream": "",
},
)
mock.get(
"http+unix://localhost:9999/v3.0.0/libpod/images"
"/sha256%3A326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab/json",
json=FIRST_IMAGE,
)
mock.get(
"http+unix://localhost:9999/v3.0.0/libpod/images"
"/c4b16966ecd94ffa910eab4e630e24f259bf34a87e924cd4b1434f267b0e354e/json",
json=SECOND_IMAGE,
)
images = self.client.images.pull("quay.io/fedora", "latest", all_tags=True)
self.assertIsInstance(images, Iterable)
self.assertIsInstance(images[0], Image)
self.assertIsInstance(images[1], Image)
self.assertEqual(images[0].id, image_id)
self.assertEqual(
images[1].id, "c4b16966ecd94ffa910eab4e630e24f259bf34a87e924cd4b1434f267b0e354e"
)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jmguzmanc/trex-core",
"score": 2
} |
#### File: emu/emu_plugins/emu_plugin_dot1x.py
```python
from trex.emu.api import *
from trex.emu.emu_plugins.emu_plugin_base import *
from trex.emu.trex_emu_validator import EMUValidator
from trex.emu.trex_emu_conversions import Mac
import trex.utils.parsing_opts as parsing_opts
class DOT1XPlugin(EMUPluginBase):
'''Defines DOT1x plugin RFC 8415 dot1x client with
EAP-MD5
EAP-MSCHAPv2
'''
plugin_name = 'DOT1X'
# init jsons example for SDK
INIT_JSON_NS = {}
"""
:parameters:
Empty.
"""
INIT_JSON_CLIENT = {'dot1x': {'user': 'username', 'password': '<PASSWORD>', 'nthash': '<PASSWORD>', 'timeo_idle': 2, 'max_start': 2}}
"""
:parameters:
user: string
User name.
password: string
password
nthash: string
Hash string for MSCHAPv2
timeo_idle: uint32
timeout for success in sec
max_start: uint32
max number of retries
"""
DOT1X_STATES = {
1: 'EAP_WAIT_FOR_IDENTITY',
2: 'EAP_WAIT_FOR_METHOD',
3: 'EAP_WAIT_FOR_RESULTS',
4: 'EAP_DONE_OK',
5: 'EAP_DONE_FAIL'
}
DOT1X_METHOD_STATES = {
1: 'METHOD_DONE',
5: 'METHOD_INIT',
6: 'METHOD_CONT',
7: 'METHOD_MAY_CONT'
}
def __init__(self, emu_client):
super(DOT1XPlugin, self).__init__(emu_client, 'dot1x_client_cnt')
@client_api('command', True)
def get_clients_info(self, c_keys):
"""
Get dot1x clients information.
:parameters:
c_keys: list of EMUClientKey
see :class:`trex.emu.trex_emu_profile.EMUClientKey`
:return:
| list: List of clients information
| [{'state': string, 'method': string, 'eap_version': int}, {..}]
"""
ver_args = [{'name': 'c_keys', 'arg': c_keys, 't': EMUClientKey, 'allow_list': True},]
EMUValidator.verify(ver_args)
c_keys = listify(c_keys)
res = self.emu_c._send_plugin_cmd_to_clients('dot1x_client_info', c_keys)
for r in res:
if 'state' in r:
r['state'] = DOT1XPlugin.DOT1X_STATES.get(r['state'], 'Unknown state')
if 'method' in r:
r['method'] = DOT1XPlugin.DOT1X_METHOD_STATES.get(r['method'], 'Unknown state')
return res
# Plugins methods
@plugin_api('dot1x_show_counters', 'emu')
def dot1x_show_counters_line(self, line):
'''Show dot1x counters (per client).\n'''
parser = parsing_opts.gen_parser(self,
"dot1x_show_counters",
self.dot1x_show_counters_line.__doc__,
parsing_opts.EMU_SHOW_CNT_GROUP,
parsing_opts.EMU_NS_GROUP,
parsing_opts.EMU_CLIENT_GROUP,
parsing_opts.EMU_DUMPS_OPT
)
opts = parser.parse_args(line.split())
self.emu_c._base_show_counters(self.data_c, opts, req_ns = True)
return True
@plugin_api('dot1x_get_clients_info', 'emu')
def dot1x_get_clients_info_line(self, line):
'''Show dot1x counters (per client).\n'''
parser = parsing_opts.gen_parser(self,
"dot1x_get_clients_info",
self.dot1x_get_clients_info_line.__doc__,
parsing_opts.EMU_NS_GROUP,
parsing_opts.MAC_ADDRESSES,
)
opts = parser.parse_args(line.split())
ns_key = EMUNamespaceKey(opts.port, opts.vlan, opts.tpid)
c_keys = []
for mac in opts.macs:
mac = Mac(mac)
c_keys.append(EMUClientKey(ns_key, mac.V()))
res = self.get_clients_info(c_keys)
keys_to_headers = [
{'key': 'state', 'header': 'State'},
{'key': 'method', 'header': 'Method'},
{'key': 'eap_version', 'header': 'EAP Version'},
]
args = {'title': 'Dot1x Clients Information', 'empty_msg': 'No Dot1x information for those clients', 'keys_to_headers': keys_to_headers}
self.print_table_by_keys(data = res, **args)
return True
``` |
{
"source": "jmh07/dissect",
"score": 2
} |
#### File: netdissect/upsegmodel/models.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from . import resnet, resnext
try:
from lib.nn import SynchronizedBatchNorm2d
except ImportError:
from torch.nn import BatchNorm2d as SynchronizedBatchNorm2d
class SegmentationModuleBase(nn.Module):
def __init__(self):
super(SegmentationModuleBase, self).__init__()
@staticmethod
def pixel_acc(pred, label, ignore_index=-1):
_, preds = torch.max(pred, dim=1)
valid = (label != ignore_index).long()
acc_sum = torch.sum(valid * (preds == label).long())
pixel_sum = torch.sum(valid)
acc = acc_sum.float() / (pixel_sum.float() + 1e-10)
return acc
@staticmethod
def part_pixel_acc(pred_part, gt_seg_part, gt_seg_object, object_label, valid):
mask_object = (gt_seg_object == object_label)
_, pred = torch.max(pred_part, dim=1)
acc_sum = mask_object * (pred == gt_seg_part)
acc_sum = torch.sum(acc_sum.view(acc_sum.size(0), -1), dim=1)
acc_sum = torch.sum(acc_sum * valid)
pixel_sum = torch.sum(mask_object.view(mask_object.size(0), -1), dim=1)
pixel_sum = torch.sum(pixel_sum * valid)
return acc_sum, pixel_sum
@staticmethod
def part_loss(pred_part, gt_seg_part, gt_seg_object, object_label, valid):
mask_object = (gt_seg_object == object_label)
loss = F.nll_loss(pred_part, gt_seg_part * mask_object.long(), reduction='none')
loss = loss * mask_object.float()
loss = torch.sum(loss.view(loss.size(0), -1), dim=1)
nr_pixel = torch.sum(mask_object.view(mask_object.shape[0], -1), dim=1)
sum_pixel = (nr_pixel * valid).sum()
loss = (loss * valid.float()).sum() / torch.clamp(sum_pixel, 1).float()
return loss
class SegmentationModule(SegmentationModuleBase):
def __init__(self, net_enc, net_dec, labeldata, loss_scale=None):
super(SegmentationModule, self).__init__()
self.encoder = net_enc
self.decoder = net_dec
self.crit_dict = nn.ModuleDict()
if loss_scale is None:
self.loss_scale = {"object": 1, "part": 0.5, "scene": 0.25, "material": 1}
else:
self.loss_scale = loss_scale
# criterion
self.crit_dict["object"] = nn.NLLLoss(ignore_index=0) # ignore background 0
self.crit_dict["material"] = nn.NLLLoss(ignore_index=0) # ignore background 0
self.crit_dict["scene"] = nn.NLLLoss(ignore_index=-1) # ignore unlabelled -1
# Label data - read from json
self.labeldata = labeldata
object_to_num = {k: v for v, k in enumerate(labeldata['object'])}
part_to_num = {k: v for v, k in enumerate(labeldata['part'])}
self.object_part = {object_to_num[k]:
[part_to_num[p] for p in v]
for k, v in labeldata['object_part'].items()}
self.object_with_part = sorted(self.object_part.keys())
self.decoder.object_part = self.object_part
self.decoder.object_with_part = self.object_with_part
def forward(self, feed_dict, *, seg_size=None):
if seg_size is None: # training
if feed_dict['source_idx'] == 0:
output_switch = {"object": True, "part": True, "scene": True, "material": False}
elif feed_dict['source_idx'] == 1:
output_switch = {"object": False, "part": False, "scene": False, "material": True}
else:
raise ValueError
pred = self.decoder(
self.encoder(feed_dict['img'], return_feature_maps=True),
output_switch=output_switch
)
# loss
loss_dict = {}
if pred['object'] is not None: # object
loss_dict['object'] = self.crit_dict['object'](pred['object'], feed_dict['seg_object'])
if pred['part'] is not None: # part
part_loss = 0
for idx_part, object_label in enumerate(self.object_with_part):
part_loss += self.part_loss(
pred['part'][idx_part], feed_dict['seg_part'],
feed_dict['seg_object'], object_label, feed_dict['valid_part'][:, idx_part])
loss_dict['part'] = part_loss
if pred['scene'] is not None: # scene
loss_dict['scene'] = self.crit_dict['scene'](pred['scene'], feed_dict['scene_label'])
if pred['material'] is not None: # material
loss_dict['material'] = self.crit_dict['material'](pred['material'], feed_dict['seg_material'])
loss_dict['total'] = sum([loss_dict[k] * self.loss_scale[k] for k in loss_dict.keys()])
# metric
metric_dict= {}
if pred['object'] is not None:
metric_dict['object'] = self.pixel_acc(
pred['object'], feed_dict['seg_object'], ignore_index=0)
if pred['material'] is not None:
metric_dict['material'] = self.pixel_acc(
pred['material'], feed_dict['seg_material'], ignore_index=0)
if pred['part'] is not None:
acc_sum, pixel_sum = 0, 0
for idx_part, object_label in enumerate(self.object_with_part):
acc, pixel = self.part_pixel_acc(
pred['part'][idx_part], feed_dict['seg_part'], feed_dict['seg_object'],
object_label, feed_dict['valid_part'][:, idx_part])
acc_sum += acc
pixel_sum += pixel
metric_dict['part'] = acc_sum.float() / (pixel_sum.float() + 1e-10)
if pred['scene'] is not None:
metric_dict['scene'] = self.pixel_acc(
pred['scene'], feed_dict['scene_label'], ignore_index=-1)
return {'metric': metric_dict, 'loss': loss_dict}
else: # inference
output_switch = {"object": True, "part": True, "scene": True, "material": True}
pred = self.decoder(self.encoder(feed_dict['img'], return_feature_maps=True),
output_switch=output_switch, seg_size=seg_size)
return pred
def conv3x3(in_planes, out_planes, stride=1, has_bias=False):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=has_bias)
def conv3x3_bn_relu(in_planes, out_planes, stride=1):
return nn.Sequential(
conv3x3(in_planes, out_planes, stride),
SynchronizedBatchNorm2d(out_planes),
nn.ReLU(inplace=True),
)
class ModelBuilder:
def __init__(self):
pass
# custom weights initialization
@staticmethod
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(1.)
m.bias.data.fill_(1e-4)
#elif classname.find('Linear') != -1:
# m.weight.data.normal_(0.0, 0.0001)
def build_encoder(self, arch='resnet50_dilated8', fc_dim=512, weights=''):
pretrained = True if len(weights) == 0 else False
if arch == 'resnet50':
orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained)
net_encoder = Resnet(orig_resnet)
elif arch == 'resnet101':
orig_resnet = resnet.__dict__['resnet101'](pretrained=pretrained)
net_encoder = Resnet(orig_resnet)
elif arch == 'resnext101':
orig_resnext = resnext.__dict__['resnext101'](pretrained=pretrained)
net_encoder = Resnet(orig_resnext) # we can still use class Resnet
else:
raise Exception('Architecture undefined!')
# net_encoder.apply(self.weights_init)
if len(weights) > 0:
# print('Loading weights for net_encoder')
net_encoder.load_state_dict(
torch.load(weights, map_location=lambda storage, loc: storage), strict=False)
return net_encoder
def build_decoder(self, nr_classes,
arch='ppm_bilinear_deepsup', fc_dim=512,
weights='', use_softmax=False):
if arch == 'upernet_lite':
net_decoder = UPerNet(
nr_classes=nr_classes,
fc_dim=fc_dim,
use_softmax=use_softmax,
fpn_dim=256)
elif arch == 'upernet':
net_decoder = UPerNet(
nr_classes=nr_classes,
fc_dim=fc_dim,
use_softmax=use_softmax,
fpn_dim=512)
else:
raise Exception('Architecture undefined!')
net_decoder.apply(self.weights_init)
if len(weights) > 0:
# print('Loading weights for net_decoder')
net_decoder.load_state_dict(
torch.load(weights, map_location=lambda storage, loc: storage), strict=False)
return net_decoder
class Resnet(nn.Module):
def __init__(self, orig_resnet):
super(Resnet, self).__init__()
# take pretrained resnet, except AvgPool and FC
self.conv1 = orig_resnet.conv1
self.bn1 = orig_resnet.bn1
self.relu1 = orig_resnet.relu1
self.conv2 = orig_resnet.conv2
self.bn2 = orig_resnet.bn2
self.relu2 = orig_resnet.relu2
self.conv3 = orig_resnet.conv3
self.bn3 = orig_resnet.bn3
self.relu3 = orig_resnet.relu3
self.maxpool = orig_resnet.maxpool
self.layer1 = orig_resnet.layer1
self.layer2 = orig_resnet.layer2
self.layer3 = orig_resnet.layer3
self.layer4 = orig_resnet.layer4
def forward(self, x, return_feature_maps=False):
conv_out = []
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = self.layer1(x); conv_out.append(x);
x = self.layer2(x); conv_out.append(x);
x = self.layer3(x); conv_out.append(x);
x = self.layer4(x); conv_out.append(x);
if return_feature_maps:
return conv_out
return [x]
# upernet
class UPerNet(nn.Module):
def __init__(self, nr_classes, fc_dim=4096,
use_softmax=False, pool_scales=(1, 2, 3, 6),
fpn_inplanes=(256,512,1024,2048), fpn_dim=256):
# Lazy import so that compilation isn't needed if not being used.
from .prroi_pool import PrRoIPool2D
super(UPerNet, self).__init__()
self.use_softmax = use_softmax
# PPM Module
self.ppm_pooling = []
self.ppm_conv = []
for scale in pool_scales:
# we use the feature map size instead of input image size, so down_scale = 1.0
self.ppm_pooling.append(PrRoIPool2D(scale, scale, 1.))
self.ppm_conv.append(nn.Sequential(
nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
SynchronizedBatchNorm2d(512),
nn.ReLU(inplace=True)
))
self.ppm_pooling = nn.ModuleList(self.ppm_pooling)
self.ppm_conv = nn.ModuleList(self.ppm_conv)
self.ppm_last_conv = conv3x3_bn_relu(fc_dim + len(pool_scales)*512, fpn_dim, 1)
# FPN Module
self.fpn_in = []
for fpn_inplane in fpn_inplanes[:-1]: # skip the top layer
self.fpn_in.append(nn.Sequential(
nn.Conv2d(fpn_inplane, fpn_dim, kernel_size=1, bias=False),
SynchronizedBatchNorm2d(fpn_dim),
nn.ReLU(inplace=True)
))
self.fpn_in = nn.ModuleList(self.fpn_in)
self.fpn_out = []
for i in range(len(fpn_inplanes) - 1): # skip the top layer
self.fpn_out.append(nn.Sequential(
conv3x3_bn_relu(fpn_dim, fpn_dim, 1),
))
self.fpn_out = nn.ModuleList(self.fpn_out)
self.conv_fusion = conv3x3_bn_relu(len(fpn_inplanes) * fpn_dim, fpn_dim, 1)
# background included. if ignore in loss, output channel 0 will not be trained.
self.nr_scene_class, self.nr_object_class, self.nr_part_class, self.nr_material_class = \
nr_classes['scene'], nr_classes['object'], nr_classes['part'], nr_classes['material']
# input: PPM out, input_dim: fpn_dim
self.scene_head = nn.Sequential(
conv3x3_bn_relu(fpn_dim, fpn_dim, 1),
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(fpn_dim, self.nr_scene_class, kernel_size=1, bias=True)
)
# input: Fusion out, input_dim: fpn_dim
self.object_head = nn.Sequential(
conv3x3_bn_relu(fpn_dim, fpn_dim, 1),
nn.Conv2d(fpn_dim, self.nr_object_class, kernel_size=1, bias=True)
)
# input: Fusion out, input_dim: fpn_dim
self.part_head = nn.Sequential(
conv3x3_bn_relu(fpn_dim, fpn_dim, 1),
nn.Conv2d(fpn_dim, self.nr_part_class, kernel_size=1, bias=True)
)
# input: FPN_2 (P2), input_dim: fpn_dim
self.material_head = nn.Sequential(
conv3x3_bn_relu(fpn_dim, fpn_dim, 1),
nn.Conv2d(fpn_dim, self.nr_material_class, kernel_size=1, bias=True)
)
def forward(self, conv_out, output_switch=None, seg_size=None):
output_dict = {k: None for k in output_switch.keys()}
conv5 = conv_out[-1]
input_size = conv5.size()
ppm_out = [conv5]
roi = [] # fake rois, just used for pooling
for i in range(input_size[0]): # batch size
roi.append(torch.Tensor([i, 0, 0, input_size[3], input_size[2]]).view(1, -1)) # b, x0, y0, x1, y1
roi = torch.cat(roi, dim=0).type_as(conv5)
ppm_out = [conv5]
for pool_scale, pool_conv in zip(self.ppm_pooling, self.ppm_conv):
ppm_out.append(pool_conv(F.interpolate(
pool_scale(conv5, roi.detach()),
(input_size[2], input_size[3]),
mode='bilinear', align_corners=False)))
ppm_out = torch.cat(ppm_out, 1)
f = self.ppm_last_conv(ppm_out)
if output_switch['scene']: # scene
output_dict['scene'] = self.scene_head(f)
if output_switch['object'] or output_switch['part'] or output_switch['material']:
fpn_feature_list = [f]
for i in reversed(range(len(conv_out) - 1)):
conv_x = conv_out[i]
conv_x = self.fpn_in[i](conv_x) # lateral branch
f = F.interpolate(
f, size=conv_x.size()[2:], mode='bilinear', align_corners=False) # top-down branch
f = conv_x + f
fpn_feature_list.append(self.fpn_out[i](f))
fpn_feature_list.reverse() # [P2 - P5]
# material
if output_switch['material']:
output_dict['material'] = self.material_head(fpn_feature_list[0])
if output_switch['object'] or output_switch['part']:
output_size = fpn_feature_list[0].size()[2:]
fusion_list = [fpn_feature_list[0]]
for i in range(1, len(fpn_feature_list)):
fusion_list.append(F.interpolate(
fpn_feature_list[i],
output_size,
mode='bilinear', align_corners=False))
fusion_out = torch.cat(fusion_list, 1)
x = self.conv_fusion(fusion_out)
if output_switch['object']: # object
output_dict['object'] = self.object_head(x)
if output_switch['part']:
output_dict['part'] = self.part_head(x)
if self.use_softmax: # is True during inference
# inference scene
x = output_dict['scene']
x = x.squeeze(3).squeeze(2)
x = F.softmax(x, dim=1)
output_dict['scene'] = x
# inference object, material
for k in ['object', 'material']:
x = output_dict[k]
x = F.interpolate(x, size=seg_size, mode='bilinear', align_corners=False)
x = F.softmax(x, dim=1)
output_dict[k] = x
# inference part
x = output_dict['part']
x = F.interpolate(x, size=seg_size, mode='bilinear', align_corners=False)
part_pred_list, head = [], 0
for idx_part, object_label in enumerate(self.object_with_part):
n_part = len(self.object_part[object_label])
_x = F.interpolate(x[:, head: head + n_part], size=seg_size, mode='bilinear', align_corners=False)
_x = F.softmax(_x, dim=1)
part_pred_list.append(_x)
head += n_part
output_dict['part'] = part_pred_list
else: # Training
# object, scene, material
for k in ['object', 'scene', 'material']:
if output_dict[k] is None:
continue
x = output_dict[k]
x = F.log_softmax(x, dim=1)
if k == "scene": # for scene
x = x.squeeze(3).squeeze(2)
output_dict[k] = x
if output_dict['part'] is not None:
part_pred_list, head = [], 0
for idx_part, object_label in enumerate(self.object_with_part):
n_part = len(self.object_part[object_label])
x = output_dict['part'][:, head: head + n_part]
x = F.log_softmax(x, dim=1)
part_pred_list.append(x)
head += n_part
output_dict['part'] = part_pred_list
return output_dict
``` |
{
"source": "jmhaining/MSD-P21422-BSF",
"score": 3
} |
#### File: jmhaining/MSD-P21422-BSF/main.py
```python
from datetime import date
from os import path
import RPi.GPIO as GPIO
import relay
import sensor
import sys
import csv
import time
import board
from dbox_upload import upload
import dropbox
from dropbox.files import WriteMode
from dropbox.exceptions import ApiError, AuthError
import urllib.request
def get_token():
file = open("/home/pi/MSD-P21422-BSF/db_token.txt", mode='r')
token = file.readline()
file.close()
return token
def cur_date_time(today, now, verb):
#Get date and time
today = date.strftime(date.today(), '%Y/%m/%d')
now = time.strftime('%I:%M:%S%p', time.localtime())
if verb:
print('Current Date:', today, now)
return today, now
def write_to_csv(in_temp_f, in_temp_c, out_temp_f, out_temp_c, in_hum, out_hum, co2, today, now, heat_stat, hum_stat, fan_stat, light_stat, fpath):
#If the file does not exist, create it, add headers, and add first line of data
if not path.exists(fpath):
with open(fpath, mode='a') as data_file:
data = csv.writer(data_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
data.writerow(['DATE', 'TIME', 'OUTDOOR TEMP C', 'OUTDOOR TEMP F', 'INDOOR TEMP C', 'INDOOR TEMP F', 'OUTDOOR HUMIDITY', 'INDOOR HUMIDITY', 'CO2', 'HEAT STAT', 'HUM STAT', 'FAN STAT', 'LIGHT STAT'])
data.writerow([today, now, out_temp_c, out_temp_f, in_temp_c, in_temp_f, out_hum, in_hum, co2, heat_stat, hum_stat, fan_stat, light_stat])
data_file.close
#Otherwise, just append new line of data
else:
with open(fpath, mode='a') as data_file:
data = csv.writer(data_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
data.writerow([today, now, out_temp_c, out_temp_f, in_temp_c, in_temp_f, out_hum, in_hum, co2, heat_stat, hum_stat, fan_stat, light_stat])
data_file.close()
return
def db_access():
# Initialize Dropbox link; check for access token
token = get_token()
if (len(token) == 0):
# edited 3-9 JN
# sys.exit("ERROR: Looks like the Dropbox access token is missing or expired."
print("ERROR: Looks like the Dropbox access token is missing or expired."
"Go to https://www.dropbox.com/developers/apps. Login and click on the"
"MSD-21422 app. Go to settings, generate a new access code and copy it"
"in line 30")
# Create an instance of a Dropbox class, which can make requests to the API.
print("Creating a Dropbox object...")
# edited 3-9 JN
try:
dbx = dropbox.Dropbox(token)
except:
pass
# Check that Dropbox access token is valid
try:
dbx.users_get_current_account()
db_connect = True
except AuthError as err:
#sys.exit("ERROR: Invalid access token; try re-generating an"
#"access token from the app console on the web.")
# edited 3-9 JN
print("ERROR: Invalid access token; try re-generating an"
"access token from the app console on the web.")
db_connect = False
return dbx, db_connect
def check_connection(host='http://google.com'):
#Check is there is an internet connection by testing connection to google
try:
urllib.request.urlopen(host)
return True
except:
return False
def main(argv):
#If True, data will print to shell as well as write to file
#If False, data will only write to file
verb = ''
if len(sys.argv) == 2:
if sys.argv[1] == '-v' or sys.argv[1] == '-verb' or sys.argv[1] == '-verbose':
verb = True
else:
print("Invalid argument. Valid argument(s): -v[erbose]")
else:
verb = False
#Initialize file path for readings
fpath = "/home/pi/MSD-P21422-BSF/Readings/"
#Initialize data variables
in_temp_f, in_temp_c, out_temp_f, out_temp_c, in_hum, out_hum, co2 = 0, 0, 0, 0, 0, 0, 0
today, now = '0', '0'
dbx = 0
db_connect = False
#Check if connected to internet
if check_connection():
#If connected to internet, establish dropbox connection
dbx, db_connect = db_access()
while True:
today, now = cur_date_time(today, now, verb)
file_name = date.strftime(date.today(), '%Y%m%d.csv')
full_path = fpath + file_name
in_temp_f, in_temp_c, out_temp_f, out_temp_c, in_hum, out_hum, co2 = \
sensor.sensor(in_temp_f, in_temp_c, out_temp_f, out_temp_c, in_hum, out_hum, co2, verb)
heat_stat, hum_stat, fan_stat, light_stat = relay.relay(in_temp_f, in_hum, co2, verb)
write_to_csv(in_temp_f, in_temp_c, out_temp_f, out_temp_c, in_hum, out_hum, co2, today, now, heat_stat, hum_stat, fan_stat, light_stat, full_path)
# Check the internet connection
if check_connection():
#If connected to internet but not dropbox
if db_connect == False:
#Establish dropbox connection
dbx, db_connect = db_access()
#If connected to internet and to dropbox
if db_connect == True:
#Upload file to dropbox
print("Uploading the file...")
upload('/' + file_name, full_path, dbx)
print("Upload successful")
#TO-DO:
#If connection is down for more than a day, add a case to upload files that were missed
#sleep in seconds. 60 = 1 minute, 300 = 5 minutes, 1800 = 30 minutes
time.sleep(599.0)
return
if __name__ == '__main__':
main(sys.argv[0:])
#main()
``` |
{
"source": "jmhal/backendservice",
"score": 3
} |
#### File: backendservice/beta/Computation.py
```python
import random
import time
import sys
import zmq
from mpi4py import MPI
from multiprocessing import Process, Value
def sensor_reading(port, sensor):
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:" + str(port.value))
while True:
message = socket.recv()
socket.send(time.ctime() + " Value:" + str(sensor.value))
if __name__ == "__main__":
# Sensor data structure
sensor = Value('i', 0)
port = Value('i', int(sys.argv[1]))
# MPI initialization
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
# Monitoring process creation
p = None
if rank == 0:
p = Process(target=sensor_reading, args=(port,sensor))
p.start()
# Perform computation
for i in range(10):
value = random.randint(0,100)
data = comm.gather(value, root = 0)
if rank == 0:
for i in range(size):
sensor.value += data[i]
print sensor.value
time.sleep(5)
# Monitoring process termination
if rank == 0:
p.terminate()
```
#### File: backendservice/beta/__init__.py
```python
import time
import logging
import random
from multiprocessing import Process, Value, Array, Pipe
from elastichpc.base.computation.malleable import ReconfigurationPort
from elastichpc.base.computation.malleable import ExecutionControlPort
from elastichpc.base.computation.malleable import MalleableComputationComponent
from elastichpc.base.computation.malleable import ComputationProgress
from elastichpc.base.platform.malleable import AllocationPort
from elastichpc.base.platform.malleable import QoSConfigurationPort
from elastichpc.base.platform.malleable import MalleablePlatformComponent
# Computation
logger = logging.getLogger('root')
# This is the process for the computation
def compute(compute_conn):
while True:
message = compute_conn.recv()
if message[0] == "start":
elif message[0] == "progress":
elif message[0] == "resources":
elif message[0] == "persist":
continue
elif message[0] == "stop":
break
compute_conn.close()
class MyReconfigurationPort(ReconfigurationPort):
def updateResources(self, resources):
logger.debug("Updating Resources for: " + str(resources))
self.component.driver_conn.send(["resources", resources])
message = self.component.driver_conn.recv()
if message[0] == "resource_reply":
return "SUCCESS"
else
return "FAIL: " + message[1]
def getComputationProgress(self):
self.component.driver_conn.send(["progress"])
message = self.component.driver_conn.recv()
if message[0] == "progress_reply":
return message[1]
else:
return -1.0
class MyExecutionControlPort(ExecutionControlPort):
def start(self, state = None):
logger.debug("Starting Computation.")
allocationPort = self.component.services.getPort("AllocationPort")
resources = allocationPort.getResources()
computation_process = Process(target = compute, args=(self.component.compute_conn,))
computation_process.daemon = True
computation_process.start();
self.component.driver_conn.send(["start", resources])
return
def isFinished(self):
if (self.component.reconfigurationPort.getComputationProgress() >= 1.0):
return True
return False
class MyMalleableComputation(MalleableComputationComponent):
def __init__(self):
super(MyMalleableComputation, self).__init__()
# There is a Pipe for the driver to communicate with the root unit
driver_conn, compute_conn = Pipe()
self.driver_conn = driver_conn
self.compute_conn = compute_conn
self.reconfigurationPort = MyReconfigurationPort("elastichpc.base.computation.malleable.ReconfigurationPort", self)
self.executionControlPort = MyExecutionControlPort("elastichpc.base.computation.malleable.ExecutionControlPort", self)
return
# Platform
# This is the process for the reconfiguration loop
def mape_k_loop(reconfiguration_port, qos_contract):
# Initialize the execution record
progress = 0.0
progressLog = {}
firstTimeStamp = time.time()
firstSample = progress
progressLog[firstTimeStamp] = progress
lastTimeStamp = firstTimeStamp
lastSample = firstSample
while progress < 1.0 :
logger.debug("Monitoring Computation Progress.")
# Monitor
progress = reconfiguration_port.getComputationProgress()
currentTimeStamp = time.time()
currentSample = progress
progressLog[currentTimeStamp] = currentSample
logger.debug("Progress: " + ("{:.2f}".format(progress)))
# Analyze
if currentSample > lastSample :
averageStepInterval = (currentTimeStamp - firstTimeStamp) / (currentSample * 10)
logger.debug("Average Step Interval:" + ("{:.2f}".format(averageStepInterval)))
# Plan
predicted = 10 * (1.0 - currentSample) * averageStepInterval
logger.debug("Predicted Remaining Time: " + ("{:.2f}".format(predicted)))
reconfigurationAction = (False, 0)
if qos_contract != None :
executionTime = qos_contract["ExecutionTime"]
executionCost = qos_contract["ExecutionCost"]
elapsedTime = currentTimeStamp - firstTimeStamp
# The case for increasing the resources
if (elapsedTime + predicted) > executionTime :
targetAverageStepTime = (executionTime - elapsedTime) / ((1 - lastSample) * 10)
reconfigurationAction = (True, targetAverageStepTime)
logger.debug("Computation Must Be Reconfigured. New Resources: " + "{:.2f}".format(targetAverageStepTime))
# The case for decreasing the resources
elif (elapsedTime + predicted) < executionTime:
# Execute
if reconfigurationAction[0] == True:
newResources = reconfigurationAction[1]
reconfiguration_port.updateResources(newResources)
# Update Samples
lastTimeStamp = currentTimeStamp
lastSample = currentSample
else :
logger.debug("Progress Unchanged.")
time.sleep(5)
elapsedTime = time.time() - firstTimeStamp
logger.debug("Elapsed Time: " + "{:.2f}".format(elapsedTime))
# logger.debug(progressLog)
return
class MyAllocationPort(AllocationPort):
def getResources(self):
interval = random.randint(10, 20)
logger.debug("Setting Resources: " + str(interval))
reconfigurationPort = self.component.services.getPort("ComputationReconfigurationPort")
qosContract = self.component.qosContract
mape_processo = Process(target = mape_k_loop, args=(reconfigurationPort, qosContract))
mape_processo.start()
logger.debug("Monitoring Started.")
return interval
class MyQoSConfigurationPort(QoSConfigurationPort):
def setQoSContract(self, qos = None):
if qos == None:
logger.debug("No Contract Defined.")
else:
logger.debug("Contract Defined (ExecutionTime, ExecutionCost): " + str(qos))
self.component.qosContract = {}
self.component.qosContract["ExecutionTime"] = qos[0]
self.component.qosContract["ExecutionCost"] = qos[1]
class MyMalleablePlatform(MalleablePlatformComponent):
def __init__(self):
super(MyMalleablePlatform, self).__init__()
self.qosContract = None
self.allocationPort = MyAllocationPort("elastichpc.base.platform.malleable.AllocationPort", self)
self.qosConfigurationPort = MyQoSConfigurationPort("elastichpc.base.platform.malleable.QoSConfigurationPort", self)
return
```
#### File: backendservice/beta/Matriz_Work_Queue.py
```python
import os
import sys
import zmq
import numpy as np
from mpi4py import MPI
from multiprocessing import Process, Value
# Procedure executed by the control process
def control_dispatch(port, persist, progress, last_line):
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:" + str(port))
while True:
message = socket.recv()
print "Control Process Message: " + message
if message == "persist":
persist.value = True
socket.send(str(last_line.value))
break;
else:
socket.send(str(progress.value))
# This method defines the line range for a given task, that is assigned to a worker
def get_task_range(last_line, task_size):
return (last_line + 1, last_line + task_size)
# MPI Initialization
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
TASKFINISHEDTAG = 2
KEEPWORKINGTAG = 1
STOPWORKINGTAG = 0
# This is the dimension of the square matrices.
N = int(sys.argv[1])
# This is the task size. It's the number of lines each worker will compute from C.
# N must be divisible by task_size and size, and task_size must be great than size.
task_size = int(sys.argv[2])
# Send B to everyone
# Suppose B is read from a file. This can be done in rank 0 and later broadcasted for everyone.
# Right now, doesn't make any sense.
B = np.ones((N,N), dtype=float)
comm.Bcast(B, root = 0)
if rank == 0:
# MPI variable for status of communication directives
status = MPI.Status()
# The progress variable holds the percentage of work done.
# The persist variable informs the rank 0 process if it should save the work and quit.
# The last_line variable holds the last line from C computed. After persisting the computation, the control
# process returns this value to the root unit of the Computation Component.
# The control process deals with the exchange of these values with the root unit of the Computation component.
progress = Value('f', 0.0, lock = True)
persist = Value('b', False, lock = True)
last_line = Value('i', -1, lock = True)
last_line.value = int(sys.argv[3])
control_process = Process(target = control_dispatch, args=(33002, persist, progress, last_line))
# print "Starting Control Process."
control_process.daemon = True
control_process.start()
# The Matrices A and C
A = np.ones((N,N), dtype=float)
C = np.zeros((N,N), dtype=float)
# Verify if the matrix file exists.
file_name = sys.argv[4]
if os.path.isfile(file_name):
print "Reading Matrix from File."
C = np.loadtxt(file_name)
print "Restarting Computation from Line: %d" % last_line.value
else:
# Otherwise, compute from the beginning.
# The value passed as a parameter is just ignored.
print "Starting Computation."
last_line.value = -1
# Dictionary with the last assignments
# For each process rank used as a key, it returns a tuple with the last
# range of columns of C assigned to this process
work_assignment = {}
# Loop over all the workers, sending one task each
# We exclude rank 0, but worker 1 receives the task 0, worker 2 the task 1, and so on...
for worker in range(1, size):
task_range = get_task_range(last_line.value, task_size)
work_assignment[worker] = task_range
last_line.value = task_range[1]
sendbuffer = A[task_range[0]:(task_range[1] + 1)]
# print "task range 0: %d task range 1: %d task size: %d" % (task_range[0], task_range[1], task_size)
comm.Send(sendbuffer.reshape(task_size * N), dest = worker, tag = KEEPWORKINGTAG)
# print "sending range %d %d to worker %d" % (task_range[0], task_range[1], worker)
# Start receiving the work done and sends new tasks
remaining_tasks = (N - last_line.value) / task_size
remaining_results = remaining_tasks + size - 1
print "Remaining_tasks: %d, Remaining_results: %d" % (remaining_tasks, remaining_results)
# This is the main loop.
while (remaining_tasks != 0) or (remaining_results != 0):
# Receive a slice of C
if remaining_results != 0:
recvbuffer = np.zeros(task_size * N, dtype = float)
comm.Recv(recvbuffer, source = MPI.ANY_SOURCE, tag = TASKFINISHEDTAG, status = status)
# Find out where to place the slice of C in the final C matrix
source = status.Get_source()
C_range = work_assignment[source]
C[C_range[0]:(C_range[1]+1)] = recvbuffer.reshape(task_size, N)
remaining_results = remaining_results - 1
# print "receiving range %d %d from worker %d" % (C_range[0], C_range[1], source)
# Progress is defined by the last line calculated.
progress.value = float(last_line.value) / N
print "Setting Progress to: %d / %d = %.2f, remaining_tasks = %d, remaining_results = %d" % (last_line.value, N, progress.value, remaining_tasks, remaining_results)
# Send new work
if remaining_tasks != 0:
task_range = get_task_range(last_line.value, task_size)
sendbuffer = sendbuffer = A[task_range[0]:(task_range[1] + 1)]
work_assignment[source] = task_range
last_line.value = task_range[1]
comm.Send(sendbuffer.reshape(task_size * N), dest = source, tag = KEEPWORKINGTAG)
remaining_tasks = remaining_tasks - 1
# print "sending new work with range %d %d for worker %d" % (task_range[0], task_range[1], source)
# Stop sending new tasks if you should save the work
if (persist.value == True):
# print "The Computation Will Persist. Remaining_Results %d" % remaining_results
remaining_tasks = 0
if remaining_results > size:
remaining_results = size - 1
# print "remaining tasks %d remaining results %d" % (len(remaining_tasks), remaining_results)
# Say bye bye to the workers
for worker in range(1, size):
print "Stopping Worker %d" % worker
sendbuffer = np.zeros(1)
comm.Send(sendbuffer, dest = worker, tag = STOPWORKINGTAG)
# print C
# Save matrix to file
print "Saving Matrix to File."
np.savetxt(file_name, C)
print "Matrix Saved to File."
# Finish the control process
control_process.terminate()
else:
# The worker code remains unchanged for the reconfigurable version.
# A worker process receives a slice of A, and since it already has B,
# a slice of C is computed. The process does not need to know where the
# slice of A fits in the full matrix A.
status = MPI.Status()
while (True):
# Recover a slice of A
recvbuffer = np.zeros((task_size, N), dtype = float)
comm.Recv(recvbuffer.reshape(task_size * N), source = 0, tag = MPI.ANY_TAG, status = status)
if status.Get_tag() == STOPWORKINGTAG :
print "Worker %d Stopping." % rank
break
# Do the work
A_worker = recvbuffer.reshape(task_size, N)
C_worker = np.zeros((task_size, N), dtype = float)
np.matmul(A_worker, B, C_worker)
# Send back a slice of C
sendbuffer = C_worker.reshape(task_size * N)
comm.Send(sendbuffer, dest = 0, tag = TASKFINISHEDTAG)
```
#### File: trials/evolving/Computation.py
```python
import logging
import os
import time
import subprocess
def number_of_nodes():
_file = open(os.environ['HOME'] + "/machinefile", "r")
n = len ([ l for l in _file.readlines() if l.strip(' \n') != '' ])
_file.close()
return n
def log(msg):
logging.debug("COMPUTATION: " + msg)
return
def computation_unit(reconfiguration_port, computation_input):
# wait until platform is ready.
message = reconfiguration_port.computation_conn.recv()
while (message[0] != "start"):
time.sleep(5)
message = reconfiguration_port.computation_conn.recv()
log("Starting Computation.")
# compute the matrix
inputs = [ int(x) for x in computation_input.split(':') ]
inputs_size = len(inputs)
home = os.environ['HOME']
prev_m = inputs[0]
for i in range(len(inputs)) :
m = inputs[i]
log("Matrix Size = m : " + str(m) + "; prev_m : " + str(prev_m))
if m > prev_m :
log("Scale Up.")
reconfiguration_port.add_node()
elif m < prev_m :
log("Scale Down.")
reconfiguration_port.remove_node()
else:
log("Input Stable.")
reconfiguration_port.get_sensors()
prev_m = m
log("Start (MatrixSize, Iteration) = |" + str(m) + "|" + str(i) +"|")
with reconfiguration_port.machine_file_lock:
nodes = 2 * number_of_nodes()
command = ["mpirun",
"-n", str(nodes), "-machinefile", home + "/machinefile",
home + "/repositorios/elastichpc/beta/trials/Matrix.py",
str(m), home + "/teste.mtr_" + str(i)]
log(str(command))
process = subprocess.Popen(command, stdout = subprocess.PIPE, stderr=subprocess.STDOUT)
(output, error) = process.communicate()
log("End (MatrixSize, Iteration) = |" + str(m) + "|" + str(i) +"|")
os.remove(home + "/teste.mtr_" + str(i))
log("Execution = " + str(output) + "|" + str(error))
log("Progress = " + str(float(i + 1) /inputs_size))
# finish the computation
reconfiguration_port.computation_conn.send(["finished"])
log("Finish Computation.")
```
#### File: infrastructure/cloud/keystone.py
```python
import json
import requests
import time
import logging
from datetime import datetime
class Keystone:
def __init__(self, auth_url, tenant_name, username, password):
self.auth_url = auth_url
self.url = auth_url + "/tokens"
self.tenant_name = tenant_name
self.username = username
self.password = password
self.token_create_time = datetime.now()
self.token = self.get_auth_token(self.tenant_name, self.username, self.password)
self.tenant_id = self.get_tenant_id(self.tenant_name)
# configure logging
logging.basicConfig(level=logging.DEBUG)
self.logger = logging.getLogger(__name__)
return
def authenticate(self):
"""
Create a token for the heat API calls.
"""
now = datetime.now()
difference = now - self.token_create_time
if difference.seconds > 60 * 60:
self.token = self.get_auth_token(self.tenant_name, self.username, self.password)
return self.token
def get_auth_token(self, tenant_name, username, password):
headers = {'Content-Type':'application/json'}
fields = {
'auth':{
'tenantName': tenant_name,
'passwordCredentials':{
'username': username,
'password': password}
}
}
r = requests.post(self.url, data = json.dumps(fields), headers = headers)
token_id = r.json()['access']['token']['id']
return token_id
def get_tenant_id(self, tenant_name):
headers = {'X-Auth-Token': self.authenticate()}
r = requests.get(self.auth_url + "/tenants", headers=headers)
json = r.json()
for element in json['tenants']:
if element['name'] == tenant_name:
return element['id']
return None
``` |
{
"source": "jmhal/CCAPython",
"score": 3
} |
#### File: MatrixMultiplication/Matrix/__init__.py
```python
from __future__ import print_function
import gov.cca
class FileSystemManagementPort(gov.cca.Port):
"""
This port should be used by the Driver to load the matrices.
"""
def __init__(self, portType, _data):
self.data = _data
super(FileSystemManagementPort, self).__init__(portType)
return
def loadMatrixFromFile(self, filename):
_file = open(filename, "r+")
for line in _file.readlines():
self.data.append([float(n) for n in line.rstrip().split(" ")])
_file.close()
return
def saveMatrixToFile(self, filename):
_file = open(filename, "w+")
for line in self.data:
_file.writelines([str(n) + " " for n in line])
_file.write('\n')
_file.close()
def createZeroMatrix(self, order):
for i in range(order):
self.data.append([0] * order)
return
class DataAccessPort(gov.cca.Port):
"""
This port should be used by the multiplier component
"""
def __init__(self, portType, _data):
self.data = _data
super(DataAccessPort, self).__init__(portType)
return
def getOrder(self):
return len(self.data)
def getItem(self, i, j):
return self.data[i][j]
def setItem(self, i, j, value):
self.data[i][j] = value
return
def printMatrix(self):
for i in range(len(self.data)):
for j in range(len(self.data)):
print(str(self.data[i][j]), end=" ")
print('')
return
class Component(gov.cca.Component):
def __init__(self):
self.data = []
self.dataAccessPort = DataAccessPort("examples.MatrixMultiplication.Matrix.DataAccessPort", self.data)
self.fileSystemManagementPort = FileSystemManagementPort("examples.MatrixMultiplication.Matrix.FileSystemManagementPort", self.data)
return
def setServices(self, services):
self.services = services
services.addProvidesPort(self.dataAccessPort, "DataAccess", "examples.MatrixMultiplication.Matrix.DataAccessPort", None)
services.addProvidesPort(self.fileSystemManagementPort, "FileSystemManagement", "examples.MatrixMultiplication.Matrix.FileSystemManagementPort", None)
return
```
#### File: framework/manage/builders.py
```python
import uuid
import importlib
from CCAPython.gov.cca import AbstractFramework
from CCAPython.gov.cca import Port
from CCAPython.gov.cca.ports import BuilderService
from CCAPython.gov.cca.ports import EventType
from CCAPython.framework.info.connectioninfo import ConnectionID
from CCAPython.framework.info.componentinfo import ComponentID
from CCAPython.framework.common.exceptions import InstanceNotFoundException
from CCAPython.framework.manage.services import ServicesHandle
from CCAPython.framework.common.typemap import TypeMapDict
class ProviderEntry():
def __init__(self, componentID, serviceProvider):
self.componentID = componentID
self.serviceProvider = serviceProvider
return
class ComponentInstance():
def __init__(self, component, release, services):
self.component = component
self.release = release
self.services = services
# Maps a string port name to a gov.cca.ConnectionID
self.usesConnection = {}
# Maps a string port name to a set of gov.cca.ConnectionID
# Has to be initialized with every provides port from the component
# I'm considering that each component has only one connection to another component through the same uses/provides port.
self.providesConnection = {}
class FrameworkHandle(AbstractFramework, BuilderService):
def __init__(self):
# Maps a string corresponding to a component instance name a ComponentInstance object
# (instanceName) -> (ComponentInstance)
self.d_instance = {}
# (string portType) -> (ProviderEntry)
self.d_serviceProviders = {}
# (string portType) -> (gov.cca.Port)
self.d_servicePorts = {}
# Maps instance names to class names
# (instanceName) -> (className)
self.d_aliases = {}
# New Methods
def lookupPort(self, componentID, portName):
"""
Simplifies the access to GoPort on components without requiring the main method to register itself with
a gov.cca.Services object.
input: gov.cca.ComponentID componentID, string portName
output: a gov.cca.Port object
"""
instanceName = componentID.getInstanceName()
if instanceName not in self.d_instance.keys():
raise InstanceNotFound(instanceName)
return self.d_instance[instanceName].services.getProvidesPort(portName)
def isProvidedService(self, portType):
"""
input: a string portType
output: a boolean
"""
if portType == "gov.cca.ports.ConnectionEventService" or portType == "gov.cca.ports.ServiceRegistry" :
return True
if portType in self.d_serviceProviders.keys() or portType in self.d_servicePorts.keys():
return True
return False
def provideRequestedServices(self, componentID, portName, portType):
"""
Provides access to two ports implemented by the framework itself: BuilderService and ConnectionEventService
input: a gov.cca.ComponentID object, a string portName, a string portType
output: void
"""
if portType == "gov.cca.ports.ConnectionEventService" :
userSvcs = self.d_instance[componentID.getInstanceName()].services
uniqueName = self.getUniqueName("connectionEventer")
svcs = getServices(uniqueName, portType, 0)
svcs.addProvidesPort(userSvcs, "ConnectionEventService", portType, 0)
connect(componentID, portName, svcs.getComponentID(), "ConnectionEventService")
elif portType == "gov.cca.ports.ServiceRegistry" :
userSvcs = self.d_instance[componentID.getInstanceName()].services
uniqueName = self.getUniqueName("registryService")
svcs = getServices(uniqueName, portType, 0)
svcs.addProvidesPort(userSvcs, "RegistryService", portType, 0)
connect(componentID, portName, svcs.getComponentID(), "RegistryService")
elif portType in self.d_servicePorts.keys() :
port = self.d_servicePorts[portType]
uniqueName = self.getUniqueName("singletonPort")
svcs = getServices(uniqueName, portType, 0)
svcs.addProvidesPort(port, "AvailService", portType, 0)
connect(componentID, portName, svcs.getComponentID(), "AvailService")
elif portType in self.d_serviceProviders.keys() :
pe = d_servicesProviders[portType]
sp = pe.serviceProvider
portName = sp.createService(portType)
connect(componentID, portName, pe.componentID, portName)
return
def addServiceProvider(self, portType, componentID, provider):
"""
input: a string portType, a gov.cca.ComponenteID componentID, a gov.cca.ports.ServiceProvider provider
output: none
"""
pe = ProviderEntry(componentID, provider)
self.d_serviceProviders[portType] = pe
return
def addServicePort(self, portType, port):
"""
input: a string portType, gov.cca.Ports port
output: none
"""
self.d_servicePorts[portType] = port
return
def removeFromRegistry(self, portType):
"""
input: a string portType
output: none
"""
if portType != "gov.cca.ports.BuilderService" :
self.d_servicePorts.pop(portType, None)
self.d_serviceProviders.pop(portType, None)
return
def setInstanceRelease(self, componentID, callback):
"""
input: a gov.cca.ComponentID componentID, a gov.cca.ComponentRelease callback2
output: none
"""
instanceName = componentID.getInstanceName()
self.d_instance[instanceName].release = callback
return
def getUniqueName(self, requestedName):
"""
input: a string requestedName
output: a string that is unique in the framework scope
"""
return requestedName + "::" + str(uuid.uuid4())
def removeInstance(self, instanceName):
"""
input: a string instanceName
output: a integer
"""
if instanceName not in self.d_instance.keys():
return 0
connectionIDs = []
# Collect all connection IDs
for portName in self.d_instance[instanceName].usesConnection:
connectionIDs.append(self.d_instance[instanceName].usesConnection[portName])
if len(connectionIDs) == 0 :
print instanceName + " does not have using ports. Removing..."
# Destroy all connections
for id_ in connectionIDs:
self.disconnect(id_, 0.0)
# Remove the instance itself
# If there were no connection lefts, the instance is already gone.
if instanceName not in self.d_instance.keys():
return 0
instance = self.d_instance[instanceName]
if instance.release != None :
instance.release.releaseServices(instance.services)
self.d_instance.pop(instanceName, None)
return 1
# Methods from AbstractFramework
def createTypeMap(self):
"""
input: none
output: a TypeMap object
throws CCAException
"""
return TypeMapDict()
def createEmptyFramework(self):
"""
input: none
output: a AbstractFramework object
throws CCAException
"""
return FrameworkHandle()
def getServices(self, selfInstanceName, selfClassName, selfProperties):
"""
input: a string selfInstanceName, string selfClassName, TypeMap selfProperties
output: a Services object
throws CCAException
"""
nil = None
cid = ComponentID(None)
uniqueName = self.getUniqueName(selfInstanceName)
cid.initialize(uniqueName)
svcs = ServicesHandle()
svcs.initialize(self, cid, selfProperties, True)
self.d_instance[uniqueName].component = None
self.d_instance[uniqueName].services = svcs
self.d_aliases[uniqueName] = selfClassName
return svcs
def releaseServices(self, services):
"""
input: a Services object
output: a AbstractFramework object
throws CCAException
"""
if services != None:
cid = services.getComponentID()
instanceName = cid.getInstanceName()
if instanceName in self.d_aliases.keys() :
n_removed_instances = removeInstance(instanceName)
n_removed_aliases = self.d_aliases.pop(instanceName, None)
if n_removed_instances != 1 or n_remove_aliases == None :
print "Unexpected behavior removing instances."
print "n_removed_instances: " + n_removed_instances
print "n_removed_aliases: " + n_removed_aliases
else :
print "Error: releaseServices() called on services object not created by getServices()"
return
def shutdownFramework(self):
"""
input: none
output: void
throws CCAException
"""
for instanceName in self.d_instance:
removeInstance(instanceName)
# Methods from BuilderService
def createInstance(self, instanceName, className, properties):
"""
Our components are Python classes. The className is in the format xxx.xxx.xxx.Class, where the xxx.
stands for the module to be imported and the Class is the name of the class. The xxx. part may be
repeated. For example, in doe.cca.Library.GaussianElimination, doe.cca.Library is the module name
and GaussianElimination is the class.
We could, in theory, use the properties TypeMap to pass parameters to the component object constructor.
But I will not do it. Let the calling entity call a initialize method.
input: a string instanceName, a string className, a gov.cca.TypeMap properties
output: a gov.cca.ComponentID object
throws CCAException
"""
moduleName = className.split('.')[0:-1]
moduleName = ".".join(moduleName)
className_ = className.split('.')[-1]
class_ = getattr(importlib.import_module(moduleName), className_)
component = class_()
uniqueName = self.getUniqueName(instanceName)
cid = ComponentID(None)
cid.initialize(uniqueName)
services = ServicesHandle()
services.initialize(self, cid, properties, False)
componentInstance = ComponentInstance(component, None, services)
self.d_instance[uniqueName] = componentInstance
#self.d_instance[uniqueName].component = component
#self.d_instance[uniqueName].services = services
component.setServices(services)
return cid
def getDeserialization(self, s):
"""
input: a string s
output: a gov.cca.ComponentID object
throws CCAException
"""
print "This is not implemented yet!!!"
return
def connect(self, user, usingPortName, provider, providingPortName):
"""
input: a gov.cca.ComponentID object user, a string usingPortName, a gov.cca.ComponentID object provider, a string providingPortName
output: a gov.cca.ConnectionID object
throws CCAException
"""
connectionID = ConnectionID()
userName = user.getInstanceName()
provName = provider.getInstanceName()
if (userName in self.d_instance.keys()) and (provName in self.d_instance.keys()) :
userSvc = self.d_instance[userName].services
provSvc = self.d_instance[provName].services
provSvc.notifyConnectionEvent(providingPortName, EventType.ConnectPending)
userSvc.notifyConnectionEvent(usingPortName, EventType.ConnectPending)
port = provSvc.getProvidesPort(providingPortName)
userSvc.bindPort(usingPortName, port)
connectionID.initialize(provider, providingPortName, user, usingPortName, None)
self.d_instance[userName].usesConnection[usingPortName] = connectionID
if providingPortName not in self.d_instance[provName].providesConnection :
self.d_instance[provName].providesConnection[providingPortName] = set()
self.d_instance[provName].providesConnection[providingPortName].add(connectionID)
provSvc.notifyConnectionEvent(providingPortName, EventType.Connected)
userSvc.notifyConnectionEvent(usingPortName, EventType.Connected)
return connectionID
def disconnect(self, connID, timeout):
"""
input: a gov.cca.ConnectionID object connID, a float timeout
output: void
throws CCAException
"""
userName = connID.getUser().getInstanceName()
userPortName = connID.getUserPortName()
provName = connID.getProvider().getInstanceName()
provPortName = connID.getProviderPortName()
print "Disconnecting " + userName + "." + userPortName + "-->" + provName + "." + provPortName
userSvcs = None
provSvcs = None
n_removed_user = 0
n_removed_provider = 0
if userName in self.d_instance :
userSvcs = self.d_instance[userName].services
else:
print "Unable to find instance: " + userName + "; Already removed?"
if provName in self.d_instance:
provSvcs = self.d_instance[provName].services
else:
print "Unable to find instance: " + provName + "; Already removed?"
if provSvcs != None and userSvcs != None :
userSvcs.notifyConnectionEvent(userPortName, EventType.DisconnectPending)
provSvcs.notifyConnectionEvent(provPortName, EventType.DisconnectPending)
if userName in self.d_instance:
n_removed_user = self.d_instance.pop(userName, 0)
if provName in self.d_instance and provPortName in self.d_instance[provName].providesConnection :
n_removed_provider = self.d_instance[provName].providesConnection[provPortName].remove(connID)
if len(self.d_instance[provName].providesConnection[provPortName]) == 0:
self.d_instance[provName].providesConnection.pop(provPortName, None)
userSvcs.notifyConnectionEvent(userPortName, EventType.Disconnected)
provSvcs.notifyConnectionEvent(provPortName, EventType.Disconnected)
return
def disconnectAll(self, id1, id2, timeout):
"""
input: a gov.cca.ComponentID id1, a gov.cca.ComponentID id2, a float timeout
output: void
throws CCAException
"""
userName = id1.getInstanceName()
provName = id2.getInstanceName()
cache = []
if userName in self.d_instance :
for conn in self.d_instance[userName].usesConnection:
if self.d_instance[userName].usesConnection[conn].getProvider().getInstanceName() == provName:
cache.append(self.d_instance[userName].usesConnection[conn])
for connID in cache:
disconnect(connID, timeout)
return
def destroyInstance(self, toDie, timeout):
"""
input: a gov.cca.ComponentID toDie, a float timeout
output: void
throws CCAException
"""
return self.removeInstance(toDie.getInstanceName())
def getComponentProperties(self, cid):
"""
input: a gov.cca.ComponentID cid
output: a gov.cca.TypeMap
throws CCAException
"""
instanceName = cid.getInstanceName()
if instanceName in self.d_instance:
svcs = self.d_instance[instanceName].services
if svcs != None:
return svcs.getInstanceProperties()
def setComponentProperties(self, cid, properties):
"""
input: a gov.cca.ComponentID cid, a gov.cca.TypeMap properties
output: void
throws CCAException
"""
instanceName = cid.getInstanceName()
if instance in self.d_instance:
svcs = self.d_instance[instanceName].services
if svcs != None :
svcs.setInstanceProperties(properties)
def getPortProperties(self, cid, portName):
"""
input: a gov.cca.ComponentID cid, a string portName
output: a gov.cca.TypeMap
throws CCAException
"""
instanceName = cid.getInstanceName()
if instanceName in self.d_instance:
svcs = self.d_instance[instanceName].services
if svcs != None:
return svcs.getPortProperties(portName)
return None
def setPortProperties(self, cid, portName, properties):
"""
input: a gov.cca.ComponentID cid, a string portName, a gov.cca.TypeMap properties
output: void
throws CCAException
"""
instanceName = cid.getInstanceName()
if instanceName in self.d_instance:
svcs = self.d_instance[instanceName].services
if svcs != None:
svcs.setPortProperties(portName, properties)
return
def getConnectionProperties(self, connID):
"""
input: a gov.cca.ConnectionID connID
output: a gov.cca.TypeMap
throws CCAException
"""
return connID.getProperties()
def setConnectionProperties(self, connID, properties):
"""
input: a gov.cca.ConnectionID connID, a gov.cca.TypeMap properties
output: void
throws CCAException
"""
userName = connID.getUser().getInstanceName()
if userName in self.d_instance :
userPortName = connID.getUserPortName()
usesConnection = self.d_instance[userName].usesConnection
if userPortName in usesConnection:
conn = usesConnection[userPortName]
if conn != None:
conn.setProperties(properties)
def getComponentID(self, componentInstanceName):
"""
input: a string componentInstanceName
output: a gov.cca.ComponentID
throws CCAException
"""
if componentInstanceName in self.d_instance:
return self.d_instance[componentInstanceName].services.getComponentID()
else :
return None
def getComponentIDs(self):
"""
input: none
output: a list of gov.cca.ComponentID
throws CCAException
"""
ids = []
for instanceName in self.d_instance:
ids.append(self.d_instance[instanceName].services.getComponentID())
return ids
def getProvidedPortNames(self, cid):
"""
input: a gov.cca.ComponenID cid
output: a list of strings
throws CCAException
"""
instanceName = cid.getInstanceName()
if instanceName in self.d_instance:
svcs = self.d_instance[instanceName].services
if svcs != None:
return svcs.getProvidedPortNames()
else:
raise InstanceNotFoundException(instanceName)
else:
raise InstanceNotFoundException(instanceName)
return []
def getUsedPortNames(self, cid):
"""
input: a gov.cca.ComponenID cid
output: a list of strings
throws CCAException
"""
instanceName = cid.getInstanceName()
if instanceName in self.d_instance:
svcs = self.d_instance[instanceName].services
if svcs != None:
return svcs.getUsedPortNames()
else:
raise InstanceNotFoundException(instanceName)
else:
raise InstanceNotFoundException(instanceName)
return []
def getConnectionIDs(self, componentList):
"""
input: a list of gov.cca.ComponentID
output: a list of goc.cca.ConnectionID
throws CCAException
"""
cache = []
for component in componentList:
instanceName = component.getInstanceName()
for portName in self.d_instance[instanceName].usesConnection :
cache.append(self.d_instance[instanceName].usesConnection[portName])
for portName in self.d_instance[instanceName].providesConnection :
for conn in self.d_instance[instanceName].providesConnection[portName] :
cache.append(conn)
return cache
``` |
{
"source": "jmhal/computacaoemnuvem",
"score": 3
} |
#### File: aws/comprehend/recuperar_tweets.py
```python
import tweepy
import os
import csv
import re
import sys
# Remover os emoticons
# https://gist.github.com/slowkow/7a7f61f495e3dbb7e3d767f97bd7304b
def remove_emoji(string):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', string)
# Recuperando os Tweets
chave_consumidor = os.environ['API_KEY']
segredo_consumidor = os.environ['API_SECRET_KEY']
token_acesso = os.environ['ACCESS_TOKEN']
token_acesso_segredo = os.environ['ACCESS_TOKEN_SECRET']
autenticacao = tweepy.OAuthHandler(chave_consumidor, segredo_consumidor)
autenticacao.set_access_token(token_acesso, token_acesso_segredo)
api = tweepy.API(autenticacao)
tweets = {}
lists = api.lists_all("jmhal", reverse=True)
for l in lists:
tweets[(l.id_str, l.name)] = []
for k in tweets.keys():
print("List: %s" % k[1])
ts = api.list_timeline(list_id=k[0], count=10000, tweet_mode="extended")
for t in ts:
tweets[(k[0],k[1])].append(t.full_text)
# Escrevendo no CSV
with open('tweets.csv', mode='w') as tweets_file:
tweets_writer = csv.writer(tweets_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for k in tweets.keys():
for t in tweets[k]:
tweets_writer.writerow([k[1], remove_emoji(t.replace('\n',''))])
``` |
{
"source": "jmhale/rds-maintenance",
"score": 2
} |
#### File: rds-maintenance/rds_maintenance/rds_maintenance.py
```python
import sys
from datetime import datetime, timedelta
from operator import itemgetter
from exclusions import EXCLUDED_INSTANCES
import boto3
import botocore
## Session/client setup operations
def get_session(access_key_id, secret_access_key):
" Establishes a session with AWS "
return boto3.session.Session(
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key
)
## EC2 operations
def get_vpc_ids(client):
" Returns a list of VPC IDs in the account "
vpc_ids = []
vpcs = client.describe_vpcs()['Vpcs']
for vpc in vpcs:
vpc_ids.append(vpc['VpcId'])
return vpc_ids
def get_isolated_sgs(client):
" Returns a dict of rds-isolate SG IDs for each VPC in account. "
vpc_ids = get_vpc_ids(client)
isolated_sgs = {}
for vpc in vpc_ids:
sec_groups = client.describe_security_groups(
Filters=[
{
"Name": "vpc-id",
"Values": [vpc]
},
{
"Name": "group-name",
"Values": ["rds-isolate"]
}
]
)['SecurityGroups']
try:
isolated_sgs[vpc] = sec_groups[0]['GroupId']
except IndexError:
print("No rds-isolate group found for VPC: {}".format(vpc))
return isolated_sgs
## Cloudwatch operations
def get_connections_statistics(client, rds_instances):
" Returns a dict of all instances and their avg DB conns over all datapoints "
rds_stats = {}
for rds_instance in rds_instances:
stats = client.get_metric_statistics(
Namespace="AWS/RDS",
MetricName="DatabaseConnections",
Statistics=['Average'],
Period=57600,
StartTime=(datetime.today() - timedelta(days=3)),
EndTime=datetime.today(),
Dimensions=[
{
'Name': 'DBInstanceIdentifier',
'Value': rds_instance['DBInstanceIdentifier']
}
]
)['Datapoints']
datapoints = []
for stat in stats:
datapoints.append(stat['Average'])
if len(datapoints) > 0:
dp_conns = sum(datapoints)/float(len(datapoints))
rds_stats[rds_instance['DBInstanceIdentifier']] = dp_conns
else:
print("Instance: %s has no datapoints." % rds_instance['DBInstanceIdentifier'])
return rds_stats
## RDS operations
def get_rds_instances(client, vpc_id=None):
" Gets all RDS instances, per VPC, if specified. "
rds_instances = []
resp = client.describe_db_instances()
while 'Marker' in resp:
rds_instances.extend(resp['DBInstances'])
resp = client.describe_db_instances(Marker=resp['Marker'])
rds_instances.extend(resp['DBInstances'])
if not vpc_id:
return rds_instances
else:
return [r for r in rds_instances if r['DBSubnetGroup']['VpcId'] == vpc_id]
def set_no_multiaz(client, rds_instance):
" Takes a rds instance obj and turns off MultiAZ "
try:
client.modify_db_instance(
DBInstanceIdentifier=rds_instance['DBInstanceIdentifier'],
MultiAZ=False,
ApplyImmediately=True
)
except botocore.exceptions.ClientError:
print("Error setting no-multiaz on instance %s" % rds_instance['DBInstanceIdentifier'])
def set_security_group(client, rds_instance, sg_id):
" Sets the rds_instance Security Group to sg_id "
try:
client.modify_db_instance(
DBInstanceIdentifier=rds_instance['DBInstanceIdentifier'],
VpcSecurityGroupIds=[sg_id]
)
except botocore.exceptions.ClientError:
print("Error setting SG on instance %s" % rds_instance['DBInstanceIdentifier'])
def set_instance_size(client, rds_instance, size=None):
" Sets instance to the smallest available size "
if not size:
available_sizes = client.describe_orderable_db_instance_options(
Engine=rds_instance['Engine']
)['OrderableDBInstanceOptions']
size = available_sizes[0]['DBInstanceClass']
try:
client.modify_db_instance(
DBInstanceIdentifier=rds_instance['DBInstanceIdentifier'],
DBInstanceClass=size,
ApplyImmediately=True
)
except botocore.exceptions.ClientError:
print("Error setting size on instance %s" % rds_instance['DBInstanceIdentifier'])
def get_instances_with_sg(client, sg_id, vpc_id=None):
""" Gets all RDS instances that are using the sg_id """
rds_instances = get_rds_instances(client, vpc_id)
instances_with_sg = []
for instance in rds_instances:
security_groups = instance['VpcSecurityGroups']
for security_group in security_groups:
if security_group['VpcSecurityGroupId'] == sg_id:
instances_with_sg.append(instance)
return instances_with_sg
def get_snaps_for_instance(client, rds_instance, snapshot_type=''):
""" Gets all snapshots for a RDS instance"""
snapshots = []
resp = client.describe_db_snapshots(
DBInstanceIdentifier=rds_instance['DBInstanceIdentifier'],
SnapshotType=snapshot_type
)
while 'Marker' in resp:
snapshots.extend(resp['DBSnapshots'])
resp = client.describe_db_snapshots(
DBInstanceIdentifier=rds_instance['DBInstanceIdentifier'],
SnapshotType=snapshot_type,
Marker=resp['Marker']
)
snapshots.extend(resp['DBSnapshots'])
return snapshots
def get_latest_snap(client, rds_instance, debug=True):
""" Gets the latest snapshot for a RDS instance """
snapshots = get_snaps_for_instance(client, rds_instance, 'automated')
sorted_snapshots = sorted(snapshots, key=itemgetter('SnapshotCreateTime'), reverse=True)
if len(sorted_snapshots) == 0:
return None
if debug:
# for sorted_snapshot in sorted_snapshots:
# print("DEBUG: Snapshot %s, created on: %s" % (sorted_snapshot['DBSnapshotIdentifier'],
# sorted_snapshot['SnapshotCreateTime']))
print("DEBUG: The latest snap should be: %s" % sorted_snapshots[0]['DBSnapshotIdentifier'])
return sorted_snapshots[0]
def check_final_snap(client, rds_instance):
""" Check if the final snapshot has already been created """
snapshots = get_snaps_for_instance(client, rds_instance, 'manual')
for snapshot in snapshots:
if snapshot['DBSnapshotIdentifier'].startswith('%s-final-snapshot'
% rds_instance['DBInstanceIdentifier']):
return True
return False
def copy_snapshot(client, rds_instance, debug=True):
""" Copy a snapshot the latest automated snapshot """
latest_snap = get_latest_snap(client, rds_instance, debug)
try:
resp = client.copy_db_snapshot(
SourceDBSnapshotIdentifier=latest_snap['DBSnapshotIdentifier'],
TargetDBSnapshotIdentifier='%s-final-snapshot-%s'
% (rds_instance['DBInstanceIdentifier'],
datetime.today().strftime('%Y%m%d-%H%M%S')),
CopyTags=True
)
print("Copied final snapshot for %s, %s --> %s"
% (rds_instance['DBInstanceIdentifier'],
latest_snap['DBSnapshotIdentifier'],
resp['DBSnapshot']['DBSnapshotIdentifier']))
except botocore.exceptions.ClientError as exception:
print("Unable to take a snapshot of instance: %s" % rds_instance['DBInstanceIdentifier'])
print(exception)
def take_snapshot(client, rds_instance):
""" Takes a snapshot of an RDS instance """
try:
resp = client.create_db_snapshot(
DBSnapshotIdentifier='%s-final-snapshot' % rds_instance['DBInstanceIdentifier'],
DBInstanceIdentifier=rds_instance['DBInstanceIdentifier'],
)
print("Created final snapshot for %s, %s"
% (rds_instance['DBInstanceIdentifier'], resp['DBSnapshot']['DBSnapshotIdentifier']))
except botocore.exceptions.ClientError as exception:
print("Unable to take a snapshot of instance: %s" % rds_instance['DBInstanceIdentifier'])
print(exception)
## CloudFormation operations
def get_all_cfn_stacks(cfn):
""" Returns all CFN stacks """
stacks = []
resp = cfn.describe_stacks()
while 'NextToken' in resp:
stacks.extend(resp['Stacks'])
resp = cfn.describe_stacks(NextToken=resp['NextToken'])
stacks.extend(resp['Stacks'])
return stacks
def get_cfn_stack_for_rds(cfn, rds_instances, debug=True):
""" Gets all CFN stacks for the given RDS instances """
stacks = get_all_cfn_stacks(cfn)
old_stacks = []
for instance in rds_instances:
for stack in stacks:
if stack['StackName'] == instance['DBInstanceIdentifier']:
old_stacks.append(stack)
if debug:
print("Stack: %s" % stack['StackName'])
return old_stacks
def destroy_cfn_stack(cfn, stack, dry_run=True):
""" Destroys a Cloudformation stack """
if not dry_run:
try:
cfn.delete_stack(StackName=stack['StackName'])
except botocore.exceptions.ClientError as exception:
print("ERROR: Delete stack: %s failed with error: %s" % (stack['StackName'], exception))
print("Deleted stack: %s" % stack['StackName'])
else:
print("DRYRUN: Would have deleted stack: %s" % stack['StackName'])
##
def get_old_instances(ec2, rds, debug=True):
""" Gets RDS instances slated for decomm """
isolated_sgs = get_isolated_sgs(ec2)
old_instances = []
for group in isolated_sgs.values():
isolated_instances = get_instances_with_sg(rds, group)
for instance in isolated_instances:
old_instances.append(instance)
if debug:
for instance in old_instances:
print(instance['DBInstanceIdentifier'])
print("%s instances found." % len(old_instances))
return old_instances
def get_old_stacks(cfn, old_instances, debug=True):
""" Gets all of the stacks for the old RDS instances """
old_stacks = get_cfn_stack_for_rds(cfn, old_instances, debug)
if debug:
print("DEBUG: Old stacks found: %s" % len(old_stacks))
return old_stacks
def destroy_old_cfn_stacks(cfn, old_stacks, dry_run=True):
""" Destroys all old CFN stacks """
for stack in old_stacks:
destroy_cfn_stack(cfn, stack, dry_run)
def snapshot_old_rds_instances(rds, old_instances, dry_run=True, debug=True):
""" Performs a final snapshot on old RDS instances. """
for instance in old_instances:
has_final_snap = check_final_snap(rds, instance)
latest_snap = get_latest_snap(rds, instance, debug)
if not dry_run and latest_snap is not None and not has_final_snap:
copy_snapshot(rds, instance, debug)
elif not dry_run and latest_snap is not None and has_final_snap:
print("%s already has a final snapshot. Skipping." % instance['DBInstanceIdentifier'])
elif dry_run and latest_snap is not None and not has_final_snap:
print("DRYRUN: Would have copied a snapshot of %s from %s"
% (instance['DBInstanceIdentifier'], latest_snap['DBSnapshotIdentifier']))
elif dry_run and latest_snap is not None and has_final_snap:
print("DRYRUN: %s already has a final snapshot. Would have skipped."
% instance['DBInstanceIdentifier'])
else:
print("No automated snapshots found for %s." % instance['DBInstanceIdentifier'])
def prep_rds_instances_for_decomm(ec2, rds, cloudwatch, dry_run=True, debug=True):
"""
Finds RDS instances with low connection counts and
applies an isolated SG, sizes it down and sets to single AZ
"""
isolated_sgs = get_isolated_sgs(ec2)
all_rds_instances = get_rds_instances(rds)
all_rds_stats = get_connections_statistics(cloudwatch, all_rds_instances)
if debug:
print("DEBUG: Number of RDS instances found: %s" % len(all_rds_instances))
print("DEBUG: Isolated SGs {}".format(isolated_sgs))
print("DEBUG: All RDS Instances: ")
for instance in all_rds_instances:
print(instance['DBInstanceIdentifier'])
abandoned_instances = []
if len(EXCLUDED_INSTANCES) > 0:
print("\nThe following instances meet low connections criteria, but have been excluded.")
for key in all_rds_stats:
if all_rds_stats[key] == 0 and key not in EXCLUDED_INSTANCES:
abandoned_instances.append(key)
elif all_rds_stats[key] == 0 and key in EXCLUDED_INSTANCES:
print(key)
if debug:
print("DEBUG: Instance: %s. Connections: %s" % (key, all_rds_stats[key]))
if len(abandoned_instances) > 0:
print("\nThe following instances appear to be abandoned. Please investigate.")
for instance in abandoned_instances:
print(instance)
else:
print("\nNo instances appear to be abandoned.")
sys.exit(0)
print("\nTaking action on the following instances: ")
for rds_instance in all_rds_instances:
if rds_instance['DBInstanceIdentifier'] in abandoned_instances and dry_run:
print("DRYRUN: %s would have been isolated and downsized."
% rds_instance['DBInstanceIdentifier'])
elif rds_instance['DBInstanceIdentifier'] in abandoned_instances and not dry_run:
print("Isolating and downsizing instance: %s"
% rds_instance['DBInstanceIdentifier'])
set_security_group(rds,
rds_instance,
isolated_sgs[rds_instance['DBSubnetGroup']['VpcId']])
set_instance_size(rds,
rds_instance,
'db.t2.small')
set_no_multiaz(rds, rds_instance)
def main():
""" main execution """
dry_run = True
debug = True
session = get_session('', '')
ec2 = session.client('ec2')
rds = session.client('rds')
cdw = session.client('cloudwatch')
cfn = session.client('cloudformation')
# prep_rds_instances_for_decomm(ec2, rds, cdw, dry_run, debug)
old_instances = get_old_instances(ec2, rds, debug)
# snapshot_old_rds_instances(rds, old_instances, dry_run, debug)
old_stacks = get_old_stacks(cfn, old_instances, debug)
# destroy_old_cfn_stacks(cfn, old_stacks, dry_run)
main()
``` |
{
"source": "jmhal/infraservice",
"score": 2
} |
#### File: infraservice/abstraction/cluster.py
```python
from infrastructureabstraction import InfrastructureAbstraction
class Cluster(InfrastructureAbstraction):
def __init__(self, clusterImplementor, profiles):
InfrastructureAbstraction.__init__(self, clusterImplementor, profiles)
``` |
{
"source": "jmhansen/npr-songs",
"score": 3
} |
#### File: app/parsers/markup_keys.py
```python
class BaseMarkupKey:
date_kwargs = None
songs_kwargs = None
song_title_kwargs = None
song_artist_kwargs = None
def __init__(self, markup, show_title):
self.markup = markup
self.show_title = show_title
def get_show_title(self):
return self.show_title
def get_episode_date(self):
return self.markup.find(**self.date_kwargs).text
def get_songs_markup(self):
return self.markup.find_all(class_='song-meta-wrap')
def build_song_list(self):
songs_markup = self.get_songs_markup()
song_list = []
for i, song in enumerate(songs_markup, 1):
# solve for empty song_title or artist tag
if song.find(**self.song_title_kwargs):
song_title = song.find(**self.song_title_kwargs).text.strip()
else:
song_title = ''
if song.find(**self.song_artist_kwargs):
artist = song.find(**self.song_artist_kwargs).text.strip()
else:
artist = ''
if song_title and artist:
song_dict = {
'song_title': song_title,
'artist': artist,
'order': i,
'program': self.get_show_title(),
'date': self.get_episode_date()
}
song_list.append(song_dict)
return song_list
class NPRGenericShowMarkupKey(BaseMarkupKey):
date_kwargs = {'class_': 'date'}
songs_kwargs = {'class_': 'song-meta-wrap'}
song_title_kwargs = {'class_': 'song-meta-title'}
song_artist_kwargs = {'class_': 'song-meta-artist'}
def get_episode_date(self):
raw_show_date = super().get_episode_date()
show_date = raw_show_date.splitlines()[2].lstrip()
return show_date
``` |
{
"source": "jmharvey1/SdrCwXcvr",
"score": 2
} |
#### File: SdrCwXcvr/Quisk/configure.py
```python
from __future__ import print_function
import sys, wx, wx.lib, wx.combo, os, re, pickle, traceback, json
from wx.lib.scrolledpanel import ScrolledPanel
from types import *
# Quisk will alter quisk_conf_defaults to include the user's config file.
import quisk_conf_defaults as conf
import _quisk as QS
# Settings is [
# 0: radio_requested, a string radio name or "Ask me" or "ConfigFileRadio"
# 1: radio in use and last used, a string radio name or "ConfigFileRadio"
# 2: list of radio names
# 3: parallel list of radio dicts. These are all the parameters for the corresponding radio. In
# general, they are a subset of all the parameters listed in self.sections and self.receiver_data[radio_name].
# ]
# radio_dict is a dictionary of variable names and text values for each radio including radio ConfigFileRadio.
# Only variable names from the specified radio and all sections are included.
# local_conf is the single instance of class Configuration
class Configuration:
def __init__(self, app, AskMe): # Called first
global application, local_conf, Settings, noname_enable, platform_ignore, platform_accept
Settings = ["ConfigFileRadio", "ConfigFileRadio", [], []]
application = app
local_conf = self
noname_enable = []
if sys.platform == 'win32':
platform_ignore = 'lin_'
platform_accept = 'win_'
else:
platform_accept = 'lin_'
platform_ignore = 'win_'
self.sections = []
self.receiver_data = []
self.StatePath = conf.settings_file_path
if not self.StatePath:
self.StatePath = os.path.join(conf.DefaultConfigDir, "quisk_settings.json")
self.ReadState()
if AskMe or Settings[0] == "Ask me":
choices = Settings[2] + ["ConfigFileRadio"]
dlg = wx.SingleChoiceDialog(None, "", "Start Quisk with this Radio",
choices, style=wx.DEFAULT_FRAME_STYLE|wx.OK|wx.CANCEL)
try:
n = choices.index(Settings[1]) # Set default to last used radio
except:
pass
else:
dlg.SetSelection(n)
ok = dlg.ShowModal()
if ok != wx.ID_OK:
sys.exit(0)
select = dlg.GetStringSelection()
dlg.Destroy()
if Settings[1] != select:
Settings[1] = select
self.settings_changed = True
else:
Settings[1] = Settings[0]
if Settings[1] == "ConfigFileRadio":
Settings[2].append("ConfigFileRadio")
Settings[3].append({})
self.ParseConfig()
def UpdateConf(self): # Called second to update the configuration for the selected radio
if Settings[1] == "ConfigFileRadio":
return
radio_dict = self.GetRadioDict()
radio_type = radio_dict['hardware_file_type']
# Fill in required values
if radio_type == "SdrIQ":
radio_dict["use_sdriq"] = '1'
else:
radio_dict["use_sdriq"] = '0'
if radio_type not in ("HiQSDR", "Hermes", "Red Pitaya", "Odyssey"):
radio_dict["use_rx_udp"] = '0'
# fill in conf from our configuration data; convert text items to Python objects
errors = ''
for k, v in radio_dict.items():
if k == 'favorites_file_path': # A null string is equivalent to "not entered"
if not v.strip():
continue
try:
fmt = self.format4name[k]
except:
errors = errors + "Ignore obsolete parameter %s\n" % k
del radio_dict[k]
self.settings_changed = True
continue
k4 = k[0:4]
if k4 == platform_ignore:
continue
elif k4 == platform_accept:
k = k[4:]
fmt4 = fmt[0:4]
if fmt4 not in ('dict', 'list'):
i1 = v.find('#')
if i1 > 0:
v = v[0:i1]
try:
if fmt4 == 'text': # Note: JSON returns Unicode strings !!!
setattr(conf, k, str(v))
elif fmt4 in ('dict', 'list'):
setattr(conf, k, v)
elif fmt4 == 'inte':
setattr(conf, k, int(v, base=0))
elif fmt4 == 'numb':
setattr(conf, k, float(v))
elif fmt4 == 'bool':
if v == "True":
setattr(conf, k, True)
else:
setattr(conf, k, False)
elif fmt4 == 'rfil':
pass
else:
print ("Unknown format for", k, fmt)
except:
errors = errors + "Failed to set %s to %s using format %s\n" % (k, v, fmt)
#traceback.print_exc()
if conf.color_scheme == 'B':
conf.__dict__.update(conf.color_scheme_B)
elif conf.color_scheme == 'C':
conf.__dict__.update(conf.color_scheme_C)
if errors:
dlg = wx.MessageDialog(None, errors,
'Update Settings', wx.OK|wx.ICON_ERROR)
ret = dlg.ShowModal()
dlg.Destroy()
def NormPath(self, path): # Convert between Unix and Window file paths
if sys.platform == 'win32':
path = path.replace('/', '\\')
else:
path = path.replace('\\', '/')
return path
def GetHardware(self): # Called third to open the hardware file
if Settings[1] == "ConfigFileRadio":
return False
path = self.GetRadioDict()["hardware_file_name"]
path = self.NormPath(path)
if not os.path.isfile(path):
dlg = wx.MessageDialog(None,
"Failure for hardware file %s!" % path,
'Hardware File', wx.OK|wx.ICON_ERROR)
ret = dlg.ShowModal()
dlg.Destroy()
path = 'quisk_hardware_model.py'
dct = {}
dct.update(conf.__dict__) # make items from conf available
if dct.has_key("Hardware"):
del dct["Hardware"]
if dct.has_key('quisk_hardware'):
del dct["quisk_hardware"]
exec(compile(open(path).read(), path, 'exec'), dct)
if dct.has_key("Hardware"):
application.Hardware = dct['Hardware'](application, conf)
return True
return False
def Initialize(self): # Called fourth to fill in our ConfigFileRadio radio from conf
if Settings[1] == "ConfigFileRadio":
radio_dict = self.GetRadioDict("ConfigFileRadio")
typ = self.GuessType()
radio_dict['hardware_file_type'] = typ
all_data = []
all_data = all_data + self.GetReceiverData(typ)
for name, sdata in self.sections:
all_data = all_data + sdata
for data_name, text, fmt, help_text, values in all_data:
data_name4 = data_name[0:4]
if data_name4 == platform_ignore:
continue
elif data_name4 == platform_accept:
conf_name = data_name[4:]
else:
conf_name = data_name
try:
if fmt in ("dict", "list"):
radio_dict[data_name] = getattr(conf, conf_name)
else:
radio_dict[data_name] = str(getattr(conf, conf_name))
except:
if data_name == 'playback_rate':
pass
else:
print ('No config file value for', data_name)
def GetWidgets(self, app, hardware, conf, frame, gbs, vertBox): # Called fifth
if Settings[1] == "ConfigFileRadio":
return False
path = self.GetRadioDict()["widgets_file_name"]
path = self.NormPath(path)
if os.path.isfile(path):
dct = {}
dct.update(conf.__dict__) # make items from conf available
exec(compile(open(path).read(), path, 'exec'), dct)
if dct.has_key("BottomWidgets"):
app.bottom_widgets = dct['BottomWidgets'](app, hardware, conf, frame, gbs, vertBox)
return True
def OnPageChanging(self, event):
index = event.GetSelection()
if index >= self.radios_page_start:
page = self.notebk.GetPage(index)
page.MakePages()
def AddPages(self, notebk, width): # Called sixth to add pages Help, Radios, all radio names
global win_width
win_width = width
self.notebk = notebk
page = ConfigHelp(notebk)
notebk.AddPage(page, "Help with Radios")
self.radio_page = Radios(notebk)
notebk.AddPage(self.radio_page, "Radios")
self.radios_page_start = notebk.GetPageCount()
if sys.platform == 'win32': # On Windows, PAGE_CHANGING doesn't work
notebk.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.OnPageChanging)
else:
notebk.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGING, self.OnPageChanging)
for name in Settings[2]:
page = RadioNotebook(notebk, name)
if name == Settings[1]:
notebk.AddPage(page, "*%s*" % name)
else:
notebk.AddPage(page, name)
def GuessType(self):
udp = conf.use_rx_udp
if conf.use_sdriq:
return 'SdrIQ'
elif udp == 1:
return 'HiQSDR'
elif udp == 2:
return 'HiQSDR'
elif udp == 10:
return 'Hermes'
elif udp > 0:
return 'HiQSDR'
return 'SoftRock USB'
def AddRadio(self, radio_name, typ):
radio_dict = {}
radio_dict['hardware_file_type'] = typ
Settings[2].append(radio_name)
Settings[3].append(radio_dict)
for data_name, text, fmt, help_text, values in self.GetReceiverData(typ):
radio_dict[data_name] = values[0]
for name, data in self.sections:
for data_name, text, fmt, help_text, values in data:
radio_dict[data_name] = values[0]
page = RadioNotebook(self.notebk, radio_name)
page.MakePages()
self.notebk.AddPage(page, radio_name)
return True
def RenameRadio(self, old, new):
index = Settings[2].index(old)
n = self.radios_page_start + index
if old == Settings[1]:
self.notebk.SetPageText(n, "*%s*" % new)
else:
self.notebk.SetPageText(n, new)
Settings[2][index] = new
self.notebk.GetPage(n).NewName(new)
if old == "ConfigFileRadio":
for ctrl in noname_enable:
ctrl.Enable()
return True
def DeleteRadio(self, name):
index = Settings[2].index(name)
n = self.radios_page_start + index
self.notebk.DeletePage(n)
del Settings[2][index]
del Settings[3][index]
return True
def GetRadioDict(self, radio_name=None): # None radio_name means the current radio
if radio_name:
index = Settings[2].index(radio_name)
else: # index of radio in use
index = Settings[2].index(Settings[1])
return Settings[3][index]
def GetSectionData(self, section_name):
for sname, data in self.sections:
if sname == section_name:
return data
return None
def GetReceiverData(self, receiver_name):
for rxname, data in self.receiver_data:
if rxname == receiver_name:
return data
return None
def GetReceiverDatum(self, receiver_name, item_name):
for rxname, data in self.receiver_data:
if rxname == receiver_name:
for data_name, text, fmt, help_text, values in data:
if item_name == data_name:
return values[0]
break
return ''
def ReceiverHasName(self, receiver_name, item_name):
for rxname, data in self.receiver_data:
if rxname == receiver_name:
for data_name, text, fmt, help_text, values in data:
if item_name == data_name:
return True
break
return False
def ReadState(self):
self.settings_changed = False
global Settings
try:
fp = open(self.StatePath, "rb")
except:
return
try:
Settings = json.load(fp)
except:
traceback.print_exc()
fp.close()
try: # Do not save settings for radio ConfigFileRadio
index = Settings[2].index("ConfigFileRadio")
except ValueError:
pass
else:
del Settings[2][index]
del Settings[3][index]
for sdict in Settings[3]: # Python None is saved as "null"
if sdict.has_key("tx_level"):
if sdict["tx_level"].has_key("null"):
v = sdict["tx_level"]["null"]
sdict["tx_level"][None] = v
del sdict["tx_level"]["null"]
def SaveState(self):
if not self.settings_changed:
return
try:
fp = open(self.StatePath, "wb")
except:
traceback.print_exc()
return
json.dump(Settings, fp, indent=2)
fp.close()
self.settings_changed = False
def ParseConfig(self):
# ParseConfig() fills self.sections, self.receiver_data, and
# self.format4name with the items that Configuration understands.
# Dicts and lists are Python objects. All other items are text, not Python objects.
#
# Sections start with 16 #, section name
# self.sections is a list of [section_name, section_data]
# section_data is a list of [data_name, text, fmt, help_text, values]
# Receiver sections start with 16 #, "Receivers ", receiver name, explain
# self.receiver_data is a list of [receiver_name, receiver_data]
# receiver_data is a list of [data_name, text, fmt, help_text, values]
# Variable names start with ## variable_name variable_text, format
# The format is integer, number, text, boolean, integer choice, text choice, rfile
# Then some help text starting with "# "
# Then a list of possible value#explain with the default first
# Then a blank line to end.
self.format4name = {}
self.format4name['hardware_file_type'] = 'text'
re_AeqB = re.compile("^#?(\w+)\s*=\s*([^#]+)#*(.*)") # item values "a = b"
section = None
data_name = None
fp = open("quisk_conf_defaults.py", "rb")
for line in fp:
line = line.strip()
if not line:
data_name = None
continue
if line[0:27] == '################ Receivers ':
section = 'Receivers'
args = line[27:].split(',', 1)
rxname = args[0].strip()
section_data = []
self.receiver_data.append((rxname, section_data))
elif line[0:17] == '################ ':
args = line[17:].split(None, 2)
section = args[0]
if section in ('Keys', 'Colors', 'Obsolete'):
section = None
continue
rxname = None
section_data = []
self.sections.append((section, section_data))
if not section:
continue
if line[0:3] == '## ': # item_name item_text, format
args = line[3:].split(None, 1)
data_name = args[0]
args = args[1].split(',', 1)
dspl = args[0].strip()
fmt = args[1].strip()
value_list = []
if self.format4name.has_key(data_name):
if self.format4name[data_name] != fmt:
print ("Inconsistent format for", data_name, self.format4name[data_name], fmt)
else:
self.format4name[data_name] = fmt
section_data.append([data_name, dspl, fmt, '', value_list])
if not data_name:
continue
mo = re_AeqB.match(line)
if mo:
if data_name != mo.group(1):
print ("Parse error for", data_name)
continue
value = mo.group(2).strip()
expln = mo.group(3).strip()
if value[0] in ('"', "'"):
value = value[1:-1]
elif value == '{': # item is a dictionary
value = getattr(conf, data_name)
elif value == '[': # item is a list
value = getattr(conf, data_name)
if expln:
value_list.append("%s # %s" % (value, expln))
else:
value_list.append(value)
elif line[0:2] == '# ':
section_data[-1][3] = section_data[-1][3] + line[2:] + ' '
fp.close()
class ConfigHelp(wx.html.HtmlWindow): # The "Help with Radios" first-level page
"""Create the help screen for the configuration tabs."""
def __init__(self, parent):
wx.html.HtmlWindow.__init__(self, parent, -1, size=(win_width, 100))
if "gtk2" in wx.PlatformInfo:
self.SetStandardFonts()
self.SetFonts("", "", [10, 12, 14, 16, 18, 20, 22])
# read in text from file help_conf.html in the directory of this module
self.LoadFile('help_conf.html')
class RadioNotebook(wx.Notebook): # The second-level notebook for each radio name
def __init__(self, parent, radio_name):
wx.Notebook.__init__(self, parent)
font = wx.Font(conf.config_font_size, wx.FONTFAMILY_SWISS, wx.NORMAL,
wx.FONTWEIGHT_NORMAL, face=conf.quisk_typeface)
self.SetFont(font)
self.radio_name = radio_name
self.pages = []
def MakePages(self):
if self.pages:
return
radio_name = self.radio_name
page = RadioHardware(self, radio_name)
self.AddPage(page, "Hardware")
self.pages.append(page)
page = RadioSound(self, radio_name)
self.AddPage(page, "Sound")
self.pages.append(page)
for section, names in local_conf.sections:
if section in ('Sound', 'Bands'): # There is a special page for these sections
continue
page = RadioSection(self, radio_name, section, names)
self.AddPage(page, section)
self.pages.append(page)
page = RadioBands(self, radio_name)
self.AddPage(page, "Bands")
self.pages.append(page)
def NewName(self, new_name):
self.radio_name = new_name
for page in self.pages:
page.radio_name = new_name
class ComboCtrl(wx.combo.ComboCtrl):
def __init__(self, parent, value, choices, no_edit=False):
self.value = value
self.choices = choices[:]
self.handler = None
self.height = parent.quisk_height
if no_edit:
wx.combo.ComboCtrl.__init__(self, parent, -1, style=wx.CB_READONLY)
else:
wx.combo.ComboCtrl.__init__(self, parent, -1, style=wx.TE_PROCESS_ENTER)
self.GetTextCtrl().Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
self.Bind(wx.EVT_TEXT_ENTER, self.OnTextEnter)
self.ctrl = ListBoxComboPopup(choices, parent.font)
self.SetPopupControl(self.ctrl)
self.SetText(value)
self.SetSizes()
def SetItems(self, lst):
self.ctrl.SetItems(lst)
self.choices = lst[:]
self.SetSizes()
def SetSizes(self):
charx = self.GetCharWidth()
wm = charx
w, h = self.GetTextExtent(self.value)
if wm < w:
wm = w
for ch in self.choices:
w, h = self.GetTextExtent(ch)
if wm < w:
wm = w
wm += charx * 5
self.SetSizeHints(wm, self.height, 9999, self.height)
def SetSelection(self, n):
try:
text = self.choices[n]
except IndexError:
self.SetText('')
self.value = ''
else:
self.ctrl.SetSelection(n)
self.SetText(text)
self.value = text
def OnTextEnter(self, event=None):
if event:
event.Skip()
if self.value != self.GetValue():
self.value = self.GetValue()
if self.handler:
ok = self.handler(self)
def OnKillFocus(self, event):
event.Skip()
self.OnTextEnter(event)
def OnListbox(self):
self.OnTextEnter()
class ListBoxComboPopup(wx.ListBox, wx.combo.ComboPopup):
def __init__(self, choices, font):
wx.combo.ComboPopup.__init__(self)
self.choices = choices
self.font = font
self.lbox = None
def Create(self, parent):
self.lbox = wx.ListBox(parent, choices=self.choices, style=wx.LB_SINGLE)
self.lbox.SetFont(self.font)
self.lbox.Bind(wx.EVT_MOTION, self.OnMotion)
self.lbox.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
return True
def SetItems(self, lst):
self.choices = lst[:]
self.lbox.Set(self.choices)
def SetSelection(self, n):
self.lbox.SetSelection(n)
def GetStringValue(self):
try:
return self.choices[self.lbox.GetSelection()]
except IndexError:
pass
return ''
def GetAdjustedSize(self, minWidth, prefHeight, maxHeight):
chary = self.lbox.GetCharHeight()
return (minWidth, chary * len(self.choices) * 15 / 10 + chary)
def OnLeftDown(self, event):
event.Skip()
self.Dismiss()
self.GetCombo().OnListbox()
def OnMotion(self, event):
event.Skip()
item = self.lbox.HitTest(event.GetPosition())
if item >= 0:
self.lbox.SetSelection(item)
def GetControl(self):
return self.lbox
class BaseWindow(ScrolledPanel):
def __init__(self, parent):
ScrolledPanel.__init__(self, parent)
self.font = wx.Font(conf.config_font_size, wx.FONTFAMILY_SWISS, wx.NORMAL,
wx.FONTWEIGHT_NORMAL, face=conf.quisk_typeface)
self.SetFont(self.font)
self.row = 1
self.charx = self.GetCharWidth()
self.chary = self.GetCharHeight()
self.quisk_height = self.chary * 14 / 10
# GBS
self.gbs = wx.GridBagSizer(2, 2)
self.gbs.SetEmptyCellSize((self.charx, self.charx))
self.SetSizer(self.gbs)
self.gbs.Add((self.charx, self.charx), (0, 0))
def MarkCols(self):
for col in range(1, self.num_cols):
c = wx.StaticText(self, -1, str(col % 10))
self.gbs.Add(c, (self.row, col))
self.row += 1
def NextRow(self, row=None):
if row is None:
self.row += 1
else:
self.row = row
def AddTextL(self, col, text, span=None):
c = wx.StaticText(self, -1, text)
if col < 0:
pass
elif span is None:
self.gbs.Add(c, (self.row, col), flag=wx.ALIGN_CENTER_VERTICAL)
else:
self.gbs.Add(c, (self.row, col), span=(1, span), flag=wx.ALIGN_CENTER_VERTICAL)
return c
def AddTextCHelp(self, col, text, help_text, span=None):
bsizer = wx.BoxSizer(wx.HORIZONTAL)
txt = wx.StaticText(self, -1, text)
bsizer.Add(txt, flag=wx.ALIGN_CENTER_VERTICAL)
btn = wx.Button(self, -1, "..")
btn.quisk_help_text = help_text
btn.quisk_caption = text
h = self.quisk_height + 2
btn.SetSizeHints(h, h, h, h)
btn.Bind(wx.EVT_BUTTON, self._BTnHelp)
bsizer.Add(btn, flag=wx.ALIGN_CENTER_VERTICAL|wx.LEFT, border=self.charx)
if col < 0:
pass
elif span is None:
self.gbs.Add(bsizer, (self.row, col), flag = wx.ALIGN_CENTER)
else:
self.gbs.Add(bsizer, (self.row, col), span=(1, span), flag = wx.ALIGN_CENTER)
return bsizer
def AddBoxSizer(self, col, span):
bsizer = wx.BoxSizer(wx.HORIZONTAL)
self.gbs.Add(bsizer, (self.row, col), span=(1, span))
return bsizer
def AddColSpacer(self, col, width): # add a width spacer to row 0
self.gbs.Add((width * self.charx, 1), (0, col)) # width is in characters
def AddRadioButton(self, col, text, span=None, start=False):
if start:
c = wx.RadioButton(self, -1, text, style=wx.RB_GROUP)
else:
c = wx.RadioButton(self, -1, text)
if col < 0:
pass
elif span is None:
self.gbs.Add(c, (self.row, col), flag=wx.ALIGN_CENTER_VERTICAL)
else:
self.gbs.Add(c, (self.row, col), span=(1, span), flag=wx.ALIGN_CENTER_VERTICAL)
return c
def AddCheckBox(self, col, text, handler=None):
btn = wx.CheckBox(self, -1, text)
h = self.quisk_height + 2
btn.SetSizeHints(-1, h, -1, h)
if col >= 0:
self.gbs.Add(btn, (self.row, col))
if self.radio_name == "ConfigFileRadio":
btn.Enable(False)
noname_enable.append(btn)
if handler:
btn.Bind(wx.EVT_CHECKBOX, handler)
return btn
def AddPushButton(self, col, text, border=0):
#btn = wx.Button(self, -1, text, style=wx.BU_EXACTFIT)
btn = wx.lib.buttons.GenButton(self, -1, text)
btn.SetBezelWidth(2)
btn.SetUseFocusIndicator(False)
h = self.quisk_height + 2
btn.SetSizeHints(-1, h, -1, h)
if col >= 0:
self.gbs.Add(btn, (self.row, col), flag=wx.RIGHT|wx.LEFT, border=border*self.charx)
if self.radio_name == "ConfigFileRadio":
btn.Enable(False)
noname_enable.append(btn)
return btn
def AddPushButtonR(self, col, text, border=0):
btn = self.AddPushButton(-1, text, border=0)
if col >= 0:
self.gbs.Add(btn, (self.row, col), flag=wx.ALIGN_RIGHT|wx.RIGHT|wx.LEFT, border=border*self.charx)
return btn
def AddComboCtrl(self, col, value, choices, right=False, no_edit=False, span=None, border=1):
cb = ComboCtrl(self, value, choices, no_edit)
if col < 0:
pass
elif span is None:
self.gbs.Add(cb, (self.row, col), flag=wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL|wx.EXPAND|wx.RIGHT|wx.LEFT, border=border*self.charx)
else:
self.gbs.Add(cb, (self.row, col), span=(1, span), flag=wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL|wx.EXPAND|wx.RIGHT|wx.LEFT, border=border*self.charx)
if self.radio_name == "ConfigFileRadio":
cb.Enable(False)
noname_enable.append(cb)
return cb
def AddComboCtrlTx(self, col, text, value, choices, right=False, no_edit=False):
c = wx.StaticText(self, -1, text)
if col >= 0:
self.gbs.Add(c, (self.row, col))
cb = self.AddComboCtrl(col + 1, value, choices, right, no_edit)
else:
cb = self.AddComboCtrl(col, value, choices, right, no_edit)
return c, cb
def AddTextComboHelp(self, col, text, value, choices, help_text, no_edit=False, border=2, span_text=1, span_combo=1):
txt = wx.StaticText(self, -1, text)
self.gbs.Add(txt, (self.row, col), span=(1, span_text), flag=wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, border=self.charx)
col += span_text
cb = self.AddComboCtrl(-1, value, choices, False, no_edit)
if no_edit:
l = len(value)
for i in range(len(choices)):
if value == choices[i][0:l]:
cb.SetSelection(i)
break
else:
print ("Failure to set value for", text, value, choices)
self.gbs.Add(cb, (self.row, col), span=(1, span_combo),
flag=wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL|wx.EXPAND|wx.RIGHT,
border=self.charx*2/10)
col += span_combo
btn = wx.Button(self, -1, "..")
btn.quisk_help_text = help_text
btn.quisk_caption = text
h = self.quisk_height + 2
btn.SetSizeHints(h, h, h, h)
self.gbs.Add(btn, (self.row, col), flag=wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, border=self.charx*border)
btn.Bind(wx.EVT_BUTTON, self._BTnHelp)
return txt, cb, btn
def _BTnHelp(self, event):
btn = event.GetEventObject()
dlg = wx.MessageDialog(self, btn.quisk_help_text, btn.quisk_caption, style=wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def OnChange(self, ctrl):
value = ctrl.GetValue()
self.OnChange2(ctrl, value)
def OnChange2(self, ctrl, value):
name = ctrl.quisk_data_name
fmt4 = local_conf.format4name[name][0:4]
if self.FormatOK(value, fmt4):
radio_dict = local_conf.GetRadioDict(self.radio_name)
radio_dict[name] = value
local_conf.settings_changed = True
def FormatOK(self, value, fmt4): # Check formats integer and number
i1 = value.find('#')
try:
if fmt4 == 'inte':
if i1 > 0:
v = int(value[0:i1], base=0)
else:
v = int(value, base=0)
elif fmt4 == 'numb':
if i1 > 0:
v = float(value[0:i1])
else:
v = float(value)
except:
dlg = wx.MessageDialog(None,
"Can not set item with format %s to value %s" % (fmt4, value),
'Change to item', wx.OK|wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return False
else:
return True
def GetValue(self, name, radio_dict):
try:
value = radio_dict[name]
except:
pass
else:
return value
# Value was not in radio_dict. Get it from conf. There are values for platform win_data_name and lin_data_name.
# The win_ and lin_ names are not in conf.
try:
fmt = local_conf.format4name[name]
except:
fmt = '' # not all items in conf are in section_data or receiver_data
try:
if fmt == 'dict': # make a copy for this radio
value = {}
value.update(getattr(conf, name))
elif fmt == 'list': # make a copy for this radio
value = getattr(conf, name)[:]
else:
value = str(getattr(conf, name))
except:
return ''
else:
return value
class Radios(BaseWindow): # The "Radios" first-level page
def __init__(self, parent):
BaseWindow.__init__(self, parent)
self.num_cols = 8
self.radio_name = None
self.cur_radio_text = self.AddTextL(1, 'xx', self.num_cols - 1)
self.SetCurrentRadioText()
self.NextRow()
self.NextRow()
item = self.AddTextL(1, "When Quisk starts, use the radio")
self.start_radio = self.AddComboCtrl(2, 'big_radio_name', choices=[], no_edit=True)
self.start_radio.handler = self.OnChoiceStartup
self.NextRow()
item = self.AddTextL(1, "Add a new radio with the general type")
choices = []
for name, data in local_conf.receiver_data:
choices.append(name)
self.add_type = self.AddComboCtrl(2, '', choices=choices, no_edit=True)
self.add_type.SetSelection(0)
item = self.AddTextL(3, "and name the new radio")
self.add_name = self.AddComboCtrl(4, '', choices=["My Radio", "SR with XVtr", "SoftRock"])
item = self.AddPushButton(5, "Add")
self.Bind(wx.EVT_BUTTON, self.OnBtnAdd, item)
self.NextRow()
item = self.AddTextL(1, "Rename the radio named")
self.rename_old = self.AddComboCtrl(2, 'big_radio_name', choices=[], no_edit=True)
item = self.AddTextL(3, "to the new name")
self.rename_new = self.AddComboCtrl(4, '', choices=["My Radio", "SR with XVtr", "SoftRock"])
item = self.AddPushButton(5, "Rename")
self.Bind(wx.EVT_BUTTON, self.OnBtnRename, item)
self.NextRow()
item = self.AddTextL(1, "Delete the radio named")
self.delete_name = self.AddComboCtrl(2, 'big_radio_name', choices=[], no_edit=True)
item = self.AddPushButton(3, "Delete")
self.Bind(wx.EVT_BUTTON, self.OnBtnDelete, item)
self.NextRow()
item = self.AddTextL(1, "Restart Quisk with new settings")
item = self.AddPushButton(2, "Restart Quisk", 1)
self.Bind(wx.EVT_BUTTON, self.OnBtnRestart, item)
if application.pulse_in_use:
pass #item.Enable(False) # Pulse requires a program exit to clean up
self.NextRow()
self.Fit()
self.SetupScrolling()
self.NewRadioNames()
def SetCurrentRadioText(self):
radio_dict = local_conf.GetRadioDict(self.radio_name)
radio_type = radio_dict['hardware_file_type']
if Settings[1] == "ConfigFileRadio":
text = 'The current radio is ConfigFileRadio, so all settings come from the config file. The hardware type is %s.' % radio_type
else:
text = "Quisk is running with settings from the radio %s. The hardware type is %s." % (Settings[1], radio_type)
self.cur_radio_text.SetLabel(text)
def DuplicateName(self, name):
if name in Settings[2] or name == "ConfigFileRadio":
dlg = wx.MessageDialog(self, "The name already exists. Please choose a different name.",
'Quisk', wx.OK)
dlg.ShowModal()
dlg.Destroy()
return True
return False
def OnBtnAdd(self, event):
name = self.add_name.GetValue().strip()
if not name or self.DuplicateName(name):
return
self.add_name.SetValue('')
typ = self.add_type.GetValue().strip()
if local_conf.AddRadio(name, typ):
if Settings[0] != "Ask me":
Settings[0] = name
self.NewRadioNames()
local_conf.settings_changed = True
def OnBtnRename(self, event):
old = self.rename_old.GetValue()
new = self.rename_new.GetValue().strip()
if not old or not new or self.DuplicateName(new):
return
self.rename_new.SetValue('')
if local_conf.RenameRadio(old, new):
if old == 'ConfigFileRadio' and Settings[1] == "ConfigFileRadio":
Settings[1] = new
elif Settings[1] == old:
Settings[1] = new
self.SetCurrentRadioText()
if Settings[0] != "Ask me":
Settings[0] = new
self.NewRadioNames()
local_conf.settings_changed = True
def OnBtnDelete(self, event):
name = self.delete_name.GetValue()
if not name:
return
dlg = wx.MessageDialog(self,
"Are you sure you want to permanently delete the radio %s?" % name,
'Quisk', wx.OK|wx.CANCEL|wx.ICON_EXCLAMATION)
ret = dlg.ShowModal()
dlg.Destroy()
if ret == wx.ID_OK and local_conf.DeleteRadio(name):
self.NewRadioNames()
local_conf.settings_changed = True
def OnChoiceStartup(self, ctrl):
choice = self.start_radio.GetValue()
if Settings[0] != choice:
Settings[0] = choice
local_conf.settings_changed = True
def NewRadioNames(self): # Correct all choice lists for changed radio names
choices = Settings[2][:] # can rename any available radio
self.rename_old.SetItems(choices)
self.rename_old.SetSelection(0)
if "ConfigFileRadio" in choices:
choices.remove("ConfigFileRadio")
if Settings[1] in choices:
choices.remove(Settings[1])
self.delete_name.SetItems(choices) # can not delete ConfigFileRadio nor the current radio
self.delete_name.SetSelection(0)
choices = Settings[2] + ["Ask me"]
if "ConfigFileRadio" not in choices:
choices.append("ConfigFileRadio")
self.start_radio.SetItems(choices) # can start any radio, plus "Ask me" and "ConfigFileRadio"
try: # Set text in control
index = choices.index(Settings[0]) # last used radio, or new or renamed radio
except:
num = len(Settings[2])
if len == 0:
index = 1
elif num == 1:
index = 0
else:
index = len(choices) - 2
Settings[0] = choices[index]
self.start_radio.SetSelection(index)
def OnBtnRestart(self, event):
application.startup_quisk = True
application.main_frame.OnBtnClose(event)
class RadioSection(BaseWindow): # The pages for each section in the second-level notebook for each radio
def __init__(self, parent, radio_name, section, names):
BaseWindow.__init__(self, parent)
self.radio_name = radio_name
self.names = names
self.num_cols = 8
#self.MarkCols()
self.NextRow(3)
col = 1
radio_dict = local_conf.GetRadioDict(radio_name)
for name, text, fmt, help_text, values in self.names:
if name == 'favorites_file_path':
self.favorites_path = radio_dict.get('favorites_file_path', '')
row = self.row
self.row = 1
item, self.favorites_combo, btn = self.AddTextComboHelp(1, text, self.favorites_path, values, help_text, False, span_text=1, span_combo=4)
self.favorites_combo.handler = self.OnButtonChangeFavorites
item = self.AddPushButtonR(7, "Change..", border=0)
item.Bind(wx.EVT_BUTTON, self.OnButtonChangeFavorites)
self.row = row
else:
if fmt[0:4] in ('dict', 'list'):
continue
if name[0:4] == platform_ignore:
continue
value = self.GetValue(name, radio_dict)
no_edit = "choice" in fmt or fmt == 'boolean'
txt, cb, btn = self.AddTextComboHelp(col, text, value, values, help_text, no_edit)
cb.handler = self.OnChange
cb.quisk_data_name = name
if col == 1:
col = 4
else:
col = 1
self.NextRow()
self.AddColSpacer(2, 20)
self.AddColSpacer(5, 20)
self.Fit()
self.SetupScrolling()
def OnButtonChangeFavorites(self, event):
if isinstance(event, ComboCtrl):
path = event.GetValue()
else:
direc, fname = os.path.split(getattr(conf, 'favorites_file_in_use'))
dlg = wx.FileDialog(None, "Choose Favorites File", direc, fname, "*.txt", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.favorites_combo.SetText(path)
dlg.Destroy()
else:
dlg.Destroy()
return
path = path.strip()
self.favorites_path = path
local_conf.GetRadioDict(self.radio_name)["favorites_file_path"] = path
local_conf.settings_changed = True
class RadioHardware(BaseWindow): # The Hardware page in the second-level notebook for each radio
def __init__(self, parent, radio_name):
BaseWindow.__init__(self, parent)
self.radio_name = radio_name
self.num_cols = 8
#self.MarkCols()
radio_dict = local_conf.GetRadioDict(radio_name)
radio_type = radio_dict['hardware_file_type']
data_names = local_conf.GetReceiverData(radio_type)
bsizer = self.AddBoxSizer(1, self.num_cols - 1)
item = self.AddTextL(-1, "These are the hardware settings for a radio of type %s" % radio_type, self.num_cols-1)
bsizer.Add(item)
self.NextRow(7)
col = 1
border = 2
for name, text, fmt, help_text, values in data_names:
if name == 'hardware_file_name':
self.hware_path = self.GetValue(name, radio_dict)
row = self.row
self.row = 3
item, self.hware_combo, btn = self.AddTextComboHelp(1, text, self.hware_path, values, help_text, False, span_text=1, span_combo=4)
self.hware_combo.handler = self.OnButtonChangeHardware
item = self.AddPushButtonR(7, "Change..", border=0)
item.Bind(wx.EVT_BUTTON, self.OnButtonChangeHardware)
self.row = row
elif name == 'widgets_file_name':
self.widgets_path = self.GetValue(name, radio_dict)
row = self.row
self.row = 5
item, self.widgets_combo, btn = self.AddTextComboHelp(1, text, self.widgets_path, values, help_text, False, span_text=1, span_combo=4)
self.widgets_combo.handler = self.OnButtonChangeWidgets
item = self.AddPushButtonR(7, "Change..", border=0)
item.Bind(wx.EVT_BUTTON, self.OnButtonChangeWidgets)
self.row = row
elif fmt[0:4] in ('dict', 'list'):
pass
elif name[0:4] == platform_ignore:
pass
else:
value = self.GetValue(name, radio_dict)
no_edit = "choice" in fmt or fmt == 'boolean'
txt, cb, btn = self.AddTextComboHelp(col, text, value, values, help_text, no_edit, border=border)
cb.handler = self.OnChange
cb.quisk_data_name = name
if col == 1:
col = 4
border = 0
else:
col = 1
border = 2
self.NextRow()
self.AddColSpacer(2, 20)
self.AddColSpacer(5, 20)
self.Fit()
self.SetupScrolling()
def OnButtonChangeHardware(self, event):
if isinstance(event, ComboCtrl):
path = event.GetValue()
else:
direc, fname = os.path.split(self.hware_path)
dlg = wx.FileDialog(None, "Choose Hardware File", direc, fname, "*.py", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.hware_combo.SetText(path)
dlg.Destroy()
else:
dlg.Destroy()
return
path = path.strip()
self.hware_path = path
local_conf.GetRadioDict(self.radio_name)["hardware_file_name"] = path
local_conf.settings_changed = True
def OnButtonChangeWidgets(self, event):
if isinstance(event, ComboCtrl):
path = event.GetValue()
else:
direc, fname = os.path.split(self.widgets_path)
dlg = wx.FileDialog(None, "Choose Widgets File", direc, fname, "*.py", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.widgets_combo.SetText(path)
dlg.Destroy()
else:
dlg.Destroy()
return
path = path.strip()
self.widgets_path = path
local_conf.GetRadioDict(self.radio_name)["widgets_file_name"] = path
local_conf.settings_changed = True
class RadioSound(BaseWindow): # The Sound page in the second-level notebook for each radio
"""Configure the available sound devices."""
sound_names = ( # same order as grid labels
('playback_rate', '', '', '', 'name_of_sound_play'),
('mic_sample_rate', 'mic_channel_I', 'mic_channel_Q', '', 'microphone_name'),
('sample_rate', 'channel_i', 'channel_q', 'channel_delay', 'name_of_sound_capt'),
('mic_playback_rate', 'mic_play_chan_I', 'mic_play_chan_Q', 'tx_channel_delay', 'name_of_mic_play'),
('', '', '', '', 'digital_input_name'),
('', '', '', '', 'digital_output_name'),
('', '', '', '', 'sample_playback_name'),
('', '', '', '', 'digital_rx1_name'),
)
def __init__(self, parent, radio_name):
BaseWindow.__init__(self, parent)
self.radio_name = radio_name
self.radio_dict = local_conf.GetRadioDict(self.radio_name)
self.num_cols = 8
thename = platform_accept + "latency_millisecs"
for name, text, fmt, help_text, values in local_conf.GetSectionData('Sound'):
if name == thename:
value = self.GetValue(name, self.radio_dict)
no_edit = "choice" in fmt or fmt == 'boolean'
txt, cb, btn = self.AddTextComboHelp(1, text, value, values, help_text, no_edit)
cb.handler = self.OnChange
cb.quisk_data_name = name
break
self.NextRow()
# Add the grid for the sound settings
sizer = wx.GridBagSizer(2, 2)
sizer.SetEmptyCellSize((self.charx, self.charx))
self.gbs.Add(sizer, (self.row, 0), span=(1, self.num_cols))
gbs = self.gbs
self.gbs = sizer
self.row = 1
dev_capt, dev_play = QS.sound_devices()
if sys.platform != 'win32':
for i in range(len(dev_capt)):
dev_capt[i] = "alsa:" + dev_capt[i]
for i in range(len(dev_play)):
dev_play[i] = "alsa:" + dev_play[i]
show = self.GetValue('show_pulse_audio_devices', self.radio_dict)
if show == 'True':
dev_capt.append("pulse # Use the default pulse device")
dev_play.append("pulse # Use the default pulse device")
for n0, n1, n2 in application.pa_dev_capt:
dev_capt.append("pulse:%s" % n0)
for n0, n1, n2 in application.pa_dev_play:
dev_play.append("pulse:%s" % n0)
dev_capt.insert(0, '')
dev_play.insert(0, '')
self.AddTextCHelp(1, "Stream",
"Quisk uses a number of sound devices for both audio and digital data. "
"Radio audio output is the sound going to the headphones or speakers. "
"Microphone input is the monophonic microphone source. Set the channel if the source is stereo. "
"I/Q sample input is the sample source if it comes from a sound device, such as a SoftRock. Otherwise, leave it blank. "
"I/Q Tx output is the transmit sample source from a SoftRock. Otherwise leave it blank. "
"Digital input is the loopback sound device attached to a digital program such as FlDigi. "
"Digital output is the loopback sound device to send Tx samples to a digital program such as FlDigi. "
"I/Q sample output sends the received I/Q data to another program. "
"Digital Rx1 Output is the loopback sound device to send sub-receiver 1 output to another program.")
self.AddTextCHelp(2, "Rate",
"This is the sample rate for the device in Hertz." "Some devices have fixed rates that can not be changed.")
self.AddTextCHelp(3, "Ch I", "This is the in-phase channel for devices with I/Q data, and the main channel for other devices.")
self.AddTextCHelp(4, "Ch Q", "This is the quadrature channel for devices with I/Q data, and the second channel for other devices.")
self.AddTextCHelp(5, "Delay", "Some older devices have a one sample channel delay between channels. "
"This must be corrected for devices with I/Q data. Enter the channel number to delay; either the I or Q channel number. "
"For no delay, leave this blank.")
self.AddTextCHelp(6, "Sound Device", "This is the name of the sound device. For Windows, this is the DirectX name. "
"For Linux you can use the Alsa device, the PortAudio device or the PulseAudio device. "
"The Alsa device are recommended because they have lower latency. See the documentation for more information.")
self.NextRow()
labels = ("Radio Audio Output", "Microphone Input", "I/Q Sample Input", "I/Q Tx Output", "Digital Input", "Digital Output", "I/Q Sample Output", "Digital Rx1 Output")
choices = (("48000", "96000", "192000"), ("0", "1"), ("0", "1"), (" ", "0", "1"))
r = 0
if "SoftRock" in self.radio_dict['hardware_file_type']: # Samples come from sound card
softrock = True
else:
softrock = False
for label in labels:
self.AddTextL(1, label)
# Add col 0
value = self.ItemValue(r, 0)
if value is None:
value = ''
data_name = self.sound_names[r][0]
if r == 0:
cb = self.AddComboCtrl(2, value, choices=("48000", "96000", "192000"), right=True)
if r == 1:
cb = self.AddComboCtrl(2, value, choices=("48000", "8000"), right=True, no_edit=True)
if softrock:
if r == 2:
cb = self.AddComboCtrl(2, value, choices=("48000", "96000", "192000"), right=True)
if r == 3:
cb = self.AddComboCtrl(2, value, choices=("48000", "96000", "192000"), right=True)
else:
if r == 2:
cb = self.AddComboCtrl(2, '', choices=("",), right=True)
cb.Enable(False)
if r == 3:
cb = self.AddComboCtrl(2, '', choices=("",), right=True)
cb.Enable(False)
if r == 4:
cb = self.AddComboCtrl(2, "48000", choices=("48000",), right=True, no_edit=True)
cb.Enable(False)
if r == 5:
cb = self.AddComboCtrl(2, "48000", choices=("48000",), right=True, no_edit=True)
cb.Enable(False)
if r == 6:
cb = self.AddComboCtrl(2, "48000", choices=("48000",), right=True, no_edit=True)
cb.Enable(False)
if r == 7:
cb = self.AddComboCtrl(2, "48000", choices=("48000",), right=True, no_edit=True)
cb.Enable(False)
cb.handler = self.OnChange
cb.quisk_data_name = data_name
# Add col 1, 2, 3
for col in range(1, 4):
value = self.ItemValue(r, col)
data_name = self.sound_names[r][col]
if value is None:
cb = self.AddComboCtrl(col + 2, ' ', choices=[], right=True)
cb.Enable(False)
else:
cb = self.AddComboCtrl(col + 2, value, choices=choices[col], right=True)
cb.handler = self.OnChange
cb.quisk_data_name = self.sound_names[r][col]
# Add col 4
if not softrock and r in (2, 3):
cb = self.AddComboCtrl(6, '', choices=[''])
cb.Enable(False)
elif "Output" in label:
cb = self.AddComboCtrl(6, self.ItemValue(r, 4), choices=dev_play)
else:
cb = self.AddComboCtrl(6, self.ItemValue(r, 4), choices=dev_capt)
cb.handler = self.OnChange
cb.quisk_data_name = platform_accept + self.sound_names[r][4]
self.NextRow()
r += 1
self.gbs = gbs
self.Fit()
self.SetupScrolling()
def ItemValue(self, row, col):
data_name = self.sound_names[row][col]
if col == 4: # Device names
data_name = platform_accept + data_name
value = self.GetValue(data_name, self.radio_dict)
return value
elif data_name:
value = self.GetValue(data_name, self.radio_dict)
if col == 3: # Delay
if value == "-1":
value = ''
return value
return None
def OnChange(self, ctrl):
data_name = ctrl.quisk_data_name
value = ctrl.GetValue()
if data_name in ('channel_delay', 'tx_channel_delay'):
value = value.strip()
if not value:
value = "-1"
self.OnChange2(ctrl, value)
class RadioBands(BaseWindow): # The Bands page in the second-level notebook for each radio
def __init__(self, parent, radio_name):
BaseWindow.__init__(self, parent)
self.radio_name = radio_name
radio_dict = local_conf.GetRadioDict(self.radio_name)
radio_type = radio_dict['hardware_file_type']
self.num_cols = 8
#self.MarkCols()
self.NextRow()
self.AddTextCHelp(1, "Bands",
"This is a list of the bands that Quisk understands. A check mark means that the band button is displayed. A maximum of "
"14 bands may be displayed.")
self.AddTextCHelp(2, " Start MHz",
"This is the start of the band in megahertz.")
self.AddTextCHelp(3, " End MHz",
"This is the end of the band in megahertz.")
heading_row = self.row
self.NextRow()
band_labels = radio_dict['bandLabels'][:]
for i in range(len(band_labels)):
if type(band_labels[i]) in (ListType, TupleType):
band_labels[i] = band_labels[i][0]
band_edge = radio_dict['BandEdge']
# band_list is a list of all known bands
band_list = band_edge.keys()
if local_conf.ReceiverHasName(radio_type, 'tx_level'):
tx_level = self.GetValue('tx_level', radio_dict)
radio_dict['tx_level'] = tx_level # Make sure the dictionary is in radio_dict
for band in tx_level.keys():
if band is None: # Special band None means the default
continue
if band not in band_list:
band_list.append(band)
else:
tx_level = None
try:
transverter_offset = radio_dict['bandTransverterOffset']
except:
transverter_offset = {}
radio_dict['bandTransverterOffset'] = transverter_offset # Make sure the dictionary is in radio_dict
else:
for band in transverter_offset.keys():
if band not in band_list:
band_list.append(band)
try:
hiqsdr_bus = radio_dict['HiQSDR_BandDict']
except:
hiqsdr_bus = None
else:
for band in hiqsdr_bus.keys():
if band not in band_list:
band_list.append(band)
try:
hermes_bus = radio_dict['Hermes_BandDict']
except:
hermes_bus = None
else:
for band in hermes_bus.keys():
if band not in band_list:
band_list.append(band)
band_list.sort(self.SortCmp)
self.band_checks = []
# Add the Audio band
cb = self.AddCheckBox(1, 'Audio', self.OnChangeBands)
self.band_checks.append(cb)
if 'Audio' in band_labels:
cb.SetValue(True)
self.NextRow()
start_row = self.row
# Add check box, start, end
for band in band_list:
cb = self.AddCheckBox(1, band, self.OnChangeBands)
self.band_checks.append(cb)
if band in band_labels:
cb.SetValue(True)
try:
start, end = band_edge[band]
start = str(start * 1E-6)
end = str(end * 1E-6)
except:
start = ''
end = ''
cb = self.AddComboCtrl(2, start, choices=(start, ), right=True)
cb.handler = self.OnChangeBandStart
cb.quisk_band = band
cb = self.AddComboCtrl(3, end, choices=(end, ), right=True)
cb.handler = self.OnChangeBandEnd
cb.quisk_band = band
self.NextRow()
col = 3
# Add tx_level
if tx_level is not None:
col += 1
self.row = heading_row
self.AddTextCHelp(col, " Tx Level",
"This is the transmit level for each band. The level is a number from zero to 255. Changes are immediate.")
self.row = start_row
for band in band_list:
try:
level = tx_level[band]
level = str(level)
except:
try:
level = tx_level[None]
tx_level[band] = level # Fill in tx_level for each band
level = str(level)
except:
tx_level[band] = 0
level = '0'
cb = self.AddComboCtrl(col, level, choices=(level, ), right=True)
cb.handler = self.OnChangeDict
cb.quisk_data_name = 'tx_level'
cb.quisk_band = band
self.NextRow()
# Add transverter offset
if type(transverter_offset) is DictType:
col += 1
self.row = heading_row
self.AddTextCHelp(col, " Transverter Offset",
"If you use a transverter, you need to tune your hardware to a frequency lower than\
the frequency displayed by Quisk. For example, if you have a 2 meter transverter,\
you may need to tune your hardware from 28 to 30 MHz to receive 144 to 146 MHz.\
Enter the transverter offset in Hertz. For this to work, your\
hardware must support it. Currently, the HiQSDR, SDR-IQ and SoftRock are supported.")
self.row = start_row
for band in band_list:
try:
offset = transverter_offset[band]
except:
offset = ''
else:
offset = str(offset)
cb = self.AddComboCtrl(col, offset, choices=(offset, ), right=True)
cb.handler = self.OnChangeDictBlank
cb.quisk_data_name = 'bandTransverterOffset'
cb.quisk_band = band
self.NextRow()
# Add hiqsdr_bus
if hiqsdr_bus is not None:
col += 1
self.row = heading_row
self.AddTextCHelp(col, " IO Bus",
"This is the value to set on the IO bus for each band. It may be used to select filters.")
self.row = start_row
for band in band_list:
try:
bus = hiqsdr_bus[band]
except:
bus = ''
bus_choice = ('11', )
else:
bus = str(bus)
bus_choice = (bus, )
cb = self.AddComboCtrl(col, bus, bus_choice, right=True)
cb.handler = self.OnChangeDict
cb.quisk_data_name = 'HiQSDR_BandDict'
cb.quisk_band = band
self.NextRow()
# Add hermes_bus
if hermes_bus is not None:
col += 1
self.row = heading_row
self.AddTextCHelp(col, " IO Bus",
"This is the value to set on the IO bus for each band. It may be used to select filters.")
self.row = start_row
for band in band_list:
try:
bus = hermes_bus[band]
except:
bus = ''
bus_choice = ('11', '0x0B', '0b00001011')
else:
#b1 = "0b%b" % bus
b1 = "0x%X" % bus
b2 = str(bus)
bus_choice = (b1, b2, '0b00000001')
bus = b1
cb = self.AddComboCtrl(col, bus, bus_choice, right=True)
cb.handler = self.OnChangeDict
cb.quisk_data_name = 'Hermes_BandDict'
cb.quisk_band = band
self.NextRow()
# Add the Time band
cb = self.AddCheckBox(1, 'Time', self.OnChangeBands)
self.band_checks.append(cb)
if 'Time' in band_labels:
cb.SetValue(True)
self.NextRow()
self.Fit()
self.SetupScrolling()
def SortCmp(self, item1, item2):
# Numerical conversion to wavelength
if item1[-2:] == 'cm':
item1 = float(item1[0:-2]) * .01
elif item1[-1] == 'k':
item1 = 300.0 / (float(item1[0:-1]) * .001)
else:
try:
item1 = float(item1)
except:
item1 = 1.0
if item2[-2:] == 'cm':
item2 = float(item2[0:-2]) * .01
elif item2[-1] == 'k':
item2 = 300.0 / (float(item2[0:-1]) * .001)
else:
try:
item2 = float(item2)
except:
item2 = 1.0
if item1 > item2:
return -1
elif item1 == item2:
return 0
else:
return +1
def OnChangeBands(self, ctrl):
band_list = []
count = 0
for cb in self.band_checks:
if cb.IsChecked():
band = cb.GetLabel()
count += 1
if band == '60' and len(conf.freq60) > 1:
band_list.append(('60', ) * len(conf.freq60))
elif band == 'Time' and len(conf.bandTime) > 1:
band_list.append(('Time', ) * len(conf.bandTime))
else:
band_list.append(band)
if count > 14:
dlg = wx.MessageDialog(None,
"There are more than the maximum of 14 bands checked. Please remove some checks.",
'List of Bands', wx.OK|wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
else:
radio_dict = local_conf.GetRadioDict(self.radio_name)
radio_dict['bandLabels'] = band_list
local_conf.settings_changed = True
def OnChangeBandStart(self, ctrl):
radio_dict = local_conf.GetRadioDict(self.radio_name)
band_edge = radio_dict['BandEdge']
band = ctrl.quisk_band
start, end = band_edge.get(band, (0, 9999))
value = ctrl.GetValue()
if self.FormatOK(value, 'numb'):
start = int(float(value) * 1E6 + 0.1)
band_edge[band] = (start, end)
local_conf.settings_changed = True
def OnChangeBandEnd(self, ctrl):
radio_dict = local_conf.GetRadioDict(self.radio_name)
band_edge = radio_dict['BandEdge']
band = ctrl.quisk_band
start, end = band_edge.get(band, (0, 9999))
value = ctrl.GetValue()
if self.FormatOK(value, 'numb'):
end = int(float(value) * 1E6 + 0.1)
band_edge[band] = (start, end)
local_conf.settings_changed = True
def OnChangeDict(self, ctrl):
radio_dict = local_conf.GetRadioDict(self.radio_name)
dct = radio_dict[ctrl.quisk_data_name]
band = ctrl.quisk_band
value = ctrl.GetValue()
if self.FormatOK(value, 'inte'):
value = int(value)
dct[band] = value
local_conf.settings_changed = True
if ctrl.quisk_data_name == 'tx_level' and hasattr(application.Hardware, "SetTxLevel"):
application.Hardware.SetTxLevel()
def OnChangeDictBlank(self, ctrl):
radio_dict = local_conf.GetRadioDict(self.radio_name)
dct = radio_dict[ctrl.quisk_data_name]
band = ctrl.quisk_band
value = ctrl.GetValue()
value = value.strip()
if not value:
if dct.has_key(band):
del dct[band]
local_conf.settings_changed = True
elif self.FormatOK(value, 'inte'):
value = int(value)
dct[band] = value
local_conf.settings_changed = True
```
#### File: SdrCwXcvr/Quisk/dxcluster.py
```python
import threading
import time
import telnetlib
import sys
import quisk_conf_defaults as conf
class DxEntry():
def __init__(self):
self.info = []
def getFreq(self):
return self.freq
def getDX(self):
return self.dx
def getSpotter(self, index):
return self.info[index][0]
def getTime(self, index):
return self.info[index][1]
def setTime(self, index, value):
L1 = list(self.info)
L2 = list(L1[index])
L2[1] = value
L1[index] = tuple(L2)
self.info = tuple(L1)
return
def getLocation(self, index):
return self.info[index][2]
def getComment(self, index):
return self.info[index][3]
def getLen(self):
return len(self.info)
def equal(self, element):
if element.getDX() == self.dx:
return True
else:
return False
def join (self, element):
for i in range (0, len(element.info)):
self.info.insert(0, element.info[i])
length = len(self.info)
# limit to max history
if length > 3:
del (self.info[length-1])
self.timestamp = max (self.timestamp, element.timestamp)
def isExpired(self):
#print(time.time()-self.timestamp)
#if time.time()-self.timestamp > conf.dxClExpireTime * 60:
# print("DELETE ENTRY")
return time.time()-self.timestamp > conf.dxClExpireTime * 60
def parseMessage(self, message):
words = message.split()
sTime = ''
locator = ''
comment = ''
if len(words) > 3 and words[0].lower() == 'dx' and words[1].lower() == 'de':
spotter = words[2].strip(':')
self.freq = int(float(words[3])*1000)
self.dx = words[4]
locator = self.dx
for index in range (5, len(words)):
word = words[index]
# print(index, word)
try:
if index < len(words)-1:
if comment != '':
comment += ' '
comment += word
# if sTime != '':
# locator = word.strip('\07')
#search time
if word[0:3].isdigit() and word[4].isalpha():
sTime = word.strip('\07')
sTime = sTime[0:2]+':'+sTime[2:4]+ ' UTC'
# if sTime == '':
# print(word)
# if comment != '':
# comment += ' '
# comment += word
except:
pass
self.info.insert(0, (spotter, sTime, locator, comment))
self.timestamp = time.time()
# print(self.dx, self.freq, spotter, sTime, locator, comment)
return True
return False
class DxCluster(threading.Thread):
def __init__(self, dxClHost, dxClPort, user_call_sign, dxClPassword, dxClFltrCmd ):
self.do_init = 1
threading.Thread.__init__(self)
self.doQuit = threading.Event()
self.dxSpots = []
self.doQuit.clear()
self.dxClHost = dxClHost
self.dxClPort = dxClPort
self.user_call_sign = user_call_sign
self.dxClFltrCmd = dxClFltrCmd
try:
if not (conf.TelnetTalk == None):
self.TelnetTalk = conf.TelnetTalk
except:
self.TelnetTalk = True
def run(self):
self.telnetInit()
if self.telnetConnect():
if not self.dxClFltrCmd =='':
self.tn.write(str(self.dxClFltrCmd) + "\n")
if(self.TelnetTalk): print(str(self.dxClFltrCmd + "\n"))
while not self.doQuit.isSet():
try:
self.telnetRead()
except:
self.tn.close()
time.sleep(20)
if not self.doQuit.isSet():
self.telnetConnect()
self.tn.close()
def setListener (self, listener):
self.listener = listener
def telnetInit(self):
self.tn = telnetlib.Telnet()
def telnetConnect(self):
#if(self.TelnetTalk): self.tn.set_debuglevel(3)
HstPrt = (str(self.dxClHost), str(self.dxClPort))
try:
self.tn.open(self.dxClHost, self.dxClPort, 10)
if(self.TelnetTalk): print('Connected to: %s; Port: %s\n' %HstPrt)
try:
self.tn.read_until('login:', 10)
self.tn.write(str(self.user_call_sign) + "\n")
# user_call_sign may be Unicode
if conf.dxClPassword:
self.tn.read_until("Password: ")
self.tn.write(str(self.dxClPassword) + "\n")
return True
except Exception:
print("DX Cluster Connection error: {}:{}".format(self.dxClHost, self.dxClPort))
return False
except Exception:
print("DX Cluster Telnet.Open() error: {}:{}".format(self.dxClHost, self.dxClPort))
return False
def telnetRead(self):
message = self.tn.read_until('\n', 60).decode(encoding='utf-8', errors='replace')
if self.doQuit.isSet() == False:
dxEntry = DxEntry();
if dxEntry.parseMessage(message):
if(self.TelnetTalk): print(message)
for i, listElement in enumerate(self.dxSpots):
if (listElement.equal(dxEntry)):
listElement.join (dxEntry)
return
if listElement.isExpired():
del (self.dxSpots[i])
self.dxSpots.append(dxEntry)
if self.listener:
self.listener()
def getHost(self):
return self.tn.host + ':' + str(self.tn.port)
def stop(self):
self.doQuit.set()
```
#### File: Quisk/hermes/quisk_widgets.py
```python
from __future__ import print_function
import wx
class BottomWidgets: # Add extra widgets to the bottom of the screen
def __init__(self, app, hardware, conf, frame, gbs, vertBox):
self.config = conf
self.hardware = hardware
self.application = app
self.start_row = app.widget_row # The first available row
self.start_col = app.button_start_col # The start of the button columns
if hardware.hermes_board_id == 0x06: # Hermes-Lite
self.Widgets_0x06(app, hardware, conf, frame, gbs, vertBox)
else:
self.Widgets_dflt(app, hardware, conf, frame, gbs, vertBox)
def Widgets_0x06(self, app, hardware, conf, frame, gbs, vertBox):
b = app.QuiskCheckbutton(frame, self.OnAGC, 'RfAgc')
gbs.Add(b, (self.start_row, self.start_col), (1, 2), flag=wx.EXPAND)
init = 10
sl = app.SliderBoxHH(frame, 'RfLna %d dB', init, -12, 48, self.OnLNA, True)
hardware.ChangeLNA(init)
gbs.Add(sl, (self.start_row, self.start_col + 2), (1, 8), flag=wx.EXPAND)
self.num_rows_added = 1
def Widgets_dflt(self, app, hardware, conf, frame, gbs, vertBox):
pass
def OnAGC(self, event):
btn = event.GetEventObject()
value = btn.GetValue()
self.hardware.ChangeAGC(value)
def OnLNA(self, event):
sl = event.GetEventObject()
value = sl.GetValue()
self.hardware.ChangeLNA(value)
```
#### File: Quisk/hiqsdr/quisk_hardware.py
```python
from __future__ import print_function
import struct, socket, math, traceback
import _quisk as QS
from quisk_hardware_model import Hardware as BaseHardware
DEBUG = 0
class Hardware(BaseHardware):
def __init__(self, app, conf):
BaseHardware.__init__(self, app, conf)
self.got_udp_status = '' # status from UDP receiver
# want_udp_status is a 14-byte string with numbers in little-endian order:
# [0:2] 'St'
# [2:6] Rx tune phase
# [6:10] Tx tune phase
# [10] Tx output level 0 to 255
# [11] Tx control bits:
# 0x01 Enable CW transmit
# 0x02 Enable all other transmit
# 0x04 Use the HiQSDR extended IO pins not present in the 2010 QEX ver 1.0
# 0x08 The key is down (software key)
# bits 5 and 4: Transmit sample rate
# 0b00 48k
# 0b01 192k
# 0b10 480k
# 0b11 8k
# 0x40 odyssey: Spot button is in use
# 0x80 odyssey: Mic Boost 20dB
# [12] Rx control bits
# bits 5 through 0
# Second stage decimation less one, 1-39, six bits
# bits 7, 6
# 0b00 Prescaler 8, 3-byte samples I and Q; 1440 / 6 = 240 samples per UDP packet
# 0b01 Prescaler 2, 2-byte samples
# 0b10 Prescaler 40, 3-byte samples
# 0b11 Prescaler 2, 1-byte samples
# [13] zero or firmware version number
# The above is used for firmware version 1.0.
# Version 1.1 adds eight more bytes for the HiQSDR conntrol ports:
# [14] X1 connector: Preselect pins 69, 68, 65, 64; Preamp pin 63, Tx LED pin 57
# [15] Attenuator pins 84, 83, 82, 81, 80
# [16] More bits: AntSwitch pin 41 is 0x01
# [17:22] The remaining five bytes are sent as zero.
# Version 1.2 uses the same format as 1.1, but adds the "Qs" command (see below).
# Version 1.3 adds features needed by the new quisk_vna.py program:
# [17] The sidetone volume 0 to 255
# [18:20] This is vna_count, the number of VNA data points; or zero for normal operation
# [20] The CW delay as specified in the config file
# [21] Control bits:
# 0x01 Switch on tx mirror on rx for adaptive predistortion
# [22:24] Noise blanker level
# The "Qs" command is a two-byte UDP packet sent to the control port. It returns the hardware status
# as the above string, except that the string starts with "Qs" instead of "St". Do not send the "Qs" command
# from Quisk, as it interferes with the "St" command. The "Qs" command is meant to be used from an
# external program, such as HamLib or a logging program.
# When vna_count != 0, we are in VNA mode. The start frequency is rx_phase, and for each point tx_phase is added
# to advance the frequency. A zero sample is added to mark the blocks. The samples are I and Q averaged at DC.
self.rx_phase = 0
self.tx_phase = 0
self.tx_level = 0
self.tx_control = 0
self.rx_control = 0
QS.set_sample_bytes(3)
self.vna_count = 0 # VNA scan count; MUST be zero for non-VNA operation
self.cw_delay = conf.cw_delay
self.index = 0
self.mode = None
self.usingSpot = False
self.band = None
self.rf_gain = 0
self.sidetone_volume = 0 # sidetone volume 0 to 255
self.repeater_freq = None # original repeater output frequency
self.HiQSDR_Connector_X1 = 0
self.HiQSDR_Attenuator = 0
self.HiQSDR_Bits = 0
try:
if conf.radio_sound_mic_boost:
self.tx_control = 0x80
except:
pass
if conf.use_rx_udp == 2: # Set to 2 for the HiQSDR
self.rf_gain_labels = ('RF 0 dB', 'RF +10', 'RF -10', 'RF -20', 'RF -30')
self.antenna_labels = ('Ant 1', 'Ant 2')
self.firmware_version = None # firmware version is initially unknown
self.rx_udp_socket = None
self.vfo_frequency = 0 # current vfo frequency
self.tx_frequency = 0
self.decimations = [] # supported decimation rates
for dec in (40, 20, 10, 8, 5, 4, 2):
self.decimations.append(dec * 64)
self.decimations.append(80)
self.decimations.append(64)
if self.conf.fft_size_multiplier == 0:
self.conf.fft_size_multiplier = 6 # Set size needed by VarDecim
def open(self):
# Create the proper broadcast address for rx_udp_ip.
nm = self.conf.rx_udp_ip_netmask.split('.')
ip = self.conf.rx_udp_ip.split('.')
nm = map(int, nm)
ip = map(int, ip)
bc = ''
for i in range(4):
x = (ip[i] | ~ nm[i]) & 0xFF
bc = bc + str(x) + '.'
self.broadcast_addr = bc[:-1]
# This socket is used for the Simple Network Discovery Protocol by AE4JY
self.socket_sndp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket_sndp.setblocking(0)
self.socket_sndp.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.sndp_request = chr(56) + chr(0) + chr(0x5A) + chr(0xA5) + chr(0) * 52
self.sndp_active = self.conf.sndp_active
# conf.rx_udp_port is used for returning ADC samples
# conf.rx_udp_port + 1 is used for control
self.rx_udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.rx_udp_socket.setblocking(0)
self.rx_udp_socket.connect((self.conf.rx_udp_ip, self.conf.rx_udp_port + 1))
return QS.open_rx_udp(self.conf.rx_udp_ip, self.conf.rx_udp_port)
def close(self):
if self.rx_udp_socket:
self.rx_udp_socket.close()
self.rx_udp_socket = None
def ReturnFrequency(self): # Return the current tuning and VFO frequency
return None, None # frequencies have not changed
def ReturnVfoFloat(self, freq=None): # Return the accurate VFO as a float
if freq is None:
rx_phase = self.rx_phase
else:
rx_phase = int(float(freq) / self.conf.rx_udp_clock * 2.0**32 + 0.5) & 0xFFFFFFFF
return float(rx_phase) * self.conf.rx_udp_clock / 2.0**32
def ChangeFrequency(self, tx_freq, vfo_freq, source='', band='', event=None):
if vfo_freq != self.vfo_frequency:
self.vfo_frequency = vfo_freq
self.rx_phase = int(float(vfo_freq - self.transverter_offset) / self.conf.rx_udp_clock * 2.0**32 + 0.5) & 0xFFFFFFFF
if tx_freq and tx_freq > 0:
self.tx_frequency = tx_freq
self.tx_phase = int(float(tx_freq - self.transverter_offset) / self.conf.rx_udp_clock * 2.0**32 + 0.5) & 0xFFFFFFFF
self.NewUdpStatus()
return tx_freq, vfo_freq
def RepeaterOffset(self, offset=None): # Change frequency for repeater offset during Tx
if offset is None: # Return True if frequency change is complete
self.HeartBeat()
return self.want_udp_status == self.got_udp_status
if offset == 0: # Change back to the original frequency
if self.repeater_freq is None: # Frequency was already reset
return self.want_udp_status == self.got_udp_status
self.tx_frequency = self.repeater_freq
self.repeater_freq = None
else: # Shift to repeater input frequency
self.repeater_freq = self.tx_frequency
offset = int(offset * 1000) # Convert kHz to Hz
self.tx_frequency += offset
self.tx_phase = int(float(self.tx_frequency - self.transverter_offset) / self.conf.rx_udp_clock * 2.0**32 + 0.5) & 0xFFFFFFFF
self.NewUdpStatus(True)
return False
def ChangeMode(self, mode):
# mode is a string: "USB", "AM", etc.
self.mode = mode
self.tx_control &= ~0x03 # Erase last two bits
if self.vna_count:
pass
elif self.usingSpot:
self.tx_control |= 0x02
elif mode in ("CWL", "CWU"):
self.tx_control |= 0x01
else:
self.tx_control |= 0x02
self.SetTxLevel()
def ChangeBand(self, band):
# band is a string: "60", "40", "WWV", etc.
BaseHardware.ChangeBand(self, band)
self.band = band
self.HiQSDR_Connector_X1 &= ~0x0F # Mask in the last four bits
self.HiQSDR_Connector_X1 |= self.conf.HiQSDR_BandDict.get(band, 0) & 0x0F
self.SetTxLevel()
def SetTxLevel(self):
# As tx_level varies from 50 to 200, the output level changes from 263 to 752 mV
# So 0 to 255 is 100 to 931, or 1.0 to 9.31; v = 1.0 + 0.0326 * level
if not self.vna_count:
try:
self.tx_level = self.conf.tx_level[self.band]
except KeyError:
self.tx_level = self.conf.tx_level.get(None, 127) # The default
if self.mode[0:3] in ('DGT', 'FDV'): # Digital modes; change power by a percentage
reduc = self.application.digital_tx_level
else:
reduc = self.application.tx_level
level = 1.0 + self.tx_level * 0.0326
level *= math.sqrt(reduc / 100.0) # Convert from a power to an amplitude
self.tx_level = int((level - 1.0) / 0.0326 + 0.5)
if self.tx_level < 0:
self.tx_level = 0
elif self.tx_level > 255:
self.tx_level = 255
self.NewUdpStatus()
def OnButtonRfGain(self, event):
# The HiQSDR attenuator is five bits: 2, 4, 8, 10, 20 dB
btn = event.GetEventObject()
n = btn.index
self.HiQSDR_Connector_X1 &= ~0x10 # Mask in the preamp bit
if n == 0: # 0dB
self.HiQSDR_Attenuator = 0
self.rf_gain = 0
elif n == 1: # +10
self.HiQSDR_Attenuator = 0
self.HiQSDR_Connector_X1 |= 0x10
self.rf_gain = 10
elif n == 2: # -10
self.HiQSDR_Attenuator = 0x08
self.rf_gain = -10
elif n == 3: # -20
self.HiQSDR_Attenuator = 0x10
self.rf_gain = -20
elif n == 4: # -30
self.HiQSDR_Attenuator = 0x18
self.rf_gain = -30
else:
self.HiQSDR_Attenuator = 0
self.rf_gain = 0
print ('Unknown RfGain')
self.NewUdpStatus()
def OnButtonPTT(self, event):
# This feature requires firmware version 1.1 or higher
if self.firmware_version:
btn = event.GetEventObject()
if btn.GetValue(): # Turn the software key bit on or off
self.tx_control |= 0x08
else:
self.tx_control &= ~0x08
self.NewUdpStatus(True) # Prompt update for PTT
def OnButtonAntenna(self, event):
# This feature requires extended IO
btn = event.GetEventObject()
if btn.index:
self.HiQSDR_Bits |= 0x01
else:
self.HiQSDR_Bits &= ~0x01
self.NewUdpStatus()
def ChangeSidetone(self, value): # The sidetone volume changed
self.sidetone_volume = int(value * 255.1) # Change 0.0-1.0 to 0-255
self.NewUdpStatus()
def HeartBeat(self):
if self.sndp_active: # AE4JY Simple Network Discovery Protocol - attempt to set the FPGA IP address
try:
self.socket_sndp.sendto(self.sndp_request, (self.broadcast_addr, 48321))
data = self.socket_sndp.recv(1024)
# print(repr(data))
except:
# traceback.print_exc()
pass
else:
if len(data) == 56 and data[5:14] == 'HiQSDR-v1':
ip = self.conf.rx_udp_ip.split('.')
t = (data[0:4] + chr(2) + data[5:37] + chr(int(ip[3])) + chr(int(ip[2])) + chr(int(ip[1])) + chr(int(ip[0]))
+ chr(0) * 12 + chr(self.conf.rx_udp_port & 0xFF) + chr(self.conf.rx_udp_port >> 8) + chr(0))
# print(repr(t))
self.socket_sndp.sendto(t, (self.broadcast_addr, 48321))
try: # receive the old status if any
data = self.rx_udp_socket.recv(1024)
if DEBUG:
self.PrintStatus(' got ', data)
except:
pass
else:
if data[0:2] == 'St':
self.got_udp_status = data
if self.firmware_version is None: # get the firmware version
if self.want_udp_status[0:13] != self.got_udp_status[0:13]:
try:
self.rx_udp_socket.send(self.want_udp_status)
if DEBUG:
self.PrintStatus('Start', self.want_udp_status)
except:
pass
else: # We got a correct response.
self.firmware_version = ord(self.got_udp_status[13]) # Firmware version is returned here
if DEBUG:
print ('Got version', self.firmware_version)
if self.firmware_version > 0 and self.conf.use_rx_udp == 2:
self.tx_control |= 0x04 # Use extra control bytes
self.sndp_active = False
self.NewUdpStatus()
else:
if self.want_udp_status != self.got_udp_status:
if DEBUG:
self.PrintStatus('Have ', self.got_udp_status)
self.PrintStatus(' send', self.want_udp_status)
try:
self.rx_udp_socket.send(self.want_udp_status)
except:
pass
elif DEBUG:
self.rx_udp_socket.send('Qs')
def PrintStatus(self, msg, string):
print (msg, ' ', end=' ')
print (string[0:2], end=' ')
for c in string[2:]:
print ("%2X" % ord(c), end=' ')
print ()
def GetFirmwareVersion(self):
return self.firmware_version
def OnSpot(self, level):
# level is -1 for Spot button Off; else the Spot level 0 to 1000.
# The Spot button sets the mode to SSB-equivalent for CW so that the Spot level works.
if level >= 0 and not self.usingSpot: # Spot was turned on
self.usingSpot = True
self.tx_control |= 0x40
self.ChangeMode(self.mode)
elif level < 0 and self.usingSpot: # Spot was turned off
self.usingSpot = False
self.tx_control &= ~0x40
self.ChangeMode(self.mode)
def OnBtnFDX(self, is_fdx): # Status of FDX button, 0 or 1
if is_fdx:
self.HiQSDR_Connector_X1 |= 0x20 # Mask in the FDX bit
else:
self.HiQSDR_Connector_X1 &= ~0x20
self.NewUdpStatus()
def VarDecimGetChoices(self): # return text labels for the control
clock = self.conf.rx_udp_clock
l = [] # a list of sample rates
for dec in self.decimations:
l.append(str(int(float(clock) / dec / 1e3 + 0.5)))
return l
def VarDecimGetLabel(self): # return a text label for the control
return "Sample rate ksps"
def VarDecimGetIndex(self): # return the current index
return self.index
def VarDecimSet(self, index=None): # set decimation, return sample rate
if index is None: # initial call to set decimation before the call to open()
rate = self.application.vardecim_set # May be None or from different hardware
try:
dec = int(float(self.conf.rx_udp_clock // rate + 0.5))
self.index = self.decimations.index(dec)
except:
try:
self.index = self.decimations.index(self.conf.rx_udp_decimation)
except:
self.index = 0
else:
self.index = index
dec = self.decimations[self.index]
if dec >= 128:
self.rx_control = dec // 64 - 1 # Second stage decimation less one
QS.set_sample_bytes(3)
else:
self.rx_control = dec // 16 - 1 # Second stage decimation less one
self.rx_control |= 0b01000000 # Change prescaler to 2 (instead of 8)
QS.set_sample_bytes(2)
self.NewUdpStatus()
return int(float(self.conf.rx_udp_clock) / dec + 0.5)
def VarDecimRange(self):
return (48000, 960000)
def NewUdpStatus(self, do_tx=False):
s = "St"
s = s + struct.pack("<L", self.rx_phase)
s = s + struct.pack("<L", self.tx_phase)
s = s + chr(self.tx_level) + chr(self.tx_control)
s = s + chr(self.rx_control)
if self.firmware_version: # Add the version
s = s + chr(self.firmware_version) # The firmware version will be returned
if self.tx_control & 0x04: # Use extra HiQSDR control bytes
s = s + chr(self.HiQSDR_Connector_X1)
s = s + chr(self.HiQSDR_Attenuator)
s = s + chr(self.HiQSDR_Bits)
else:
s = s + chr(0) * 3
s = s + chr(self.sidetone_volume)
s = s + struct.pack("<H", self.vna_count)
s = s + chr(self.cw_delay)
s = s + chr(0)
else: # firmware version 0 or None
s = s + chr(0) # assume version 0
self.want_udp_status = s
if do_tx:
try:
self.rx_udp_socket.send(s)
except:
pass
def SetVNA(self, key_down=None, vna_start=None, vna_stop=None, vna_count=None, do_tx=False):
if key_down is None:
pass
elif key_down:
self.tx_control |= 0x08
else:
self.tx_control &= ~0x08
if vna_count is not None:
self.vna_count = vna_count # Number of scan points
if vna_start is not None: # Set the start and stop frequencies. The tx_phase is the frequency delta.
self.rx_phase = int(float(vna_start) / self.conf.rx_udp_clock * 2.0**32 + 0.5) & 0xFFFFFFFF
self.tx_phase = int(float(vna_stop - vna_start) / (self.vna_count - 1) / self.conf.rx_udp_clock * 2.0**32 + 0.5) & 0xFFFFFFFF
self.tx_control &= ~0x03 # Erase last two bits
self.rx_control = 40 - 1
self.tx_level = 255
self.NewUdpStatus(do_tx)
start = int(float(self.rx_phase) * self.conf.rx_udp_clock / 2.0**32 + 0.5)
phase = self.rx_phase + self.tx_phase * (self.vna_count - 1)
stop = int(float(phase) * self.conf.rx_udp_clock / 2.0**32 + 0.5)
return start, stop # return the start and stop frequencies after integer rounding
```
#### File: Quisk/n2adr/scanner_widgets.py
```python
from __future__ import print_function
import wx, time
import _quisk as QS
class BottomWidgets: # Add extra widgets to the bottom of the screen
def __init__(self, app, hardware, conf, frame, gbs, vertBox):
self.config = conf
self.hardware = hardware
self.application = app
row = 4 # The next available row
b = app.QuiskPushbutton(frame, None, 'Tune')
bw, bh = b.GetMinSize()
b.Enable(0)
gbs.Add(b, (row, 0), (1, 2), flag=wx.EXPAND)
b = app.QuiskPushbutton(frame, None, '')
gbs.Add(b, (row, 2), (1, 2), flag=wx.EXPAND)
b = app.QuiskPushbutton(frame, None, '')
gbs.Add(b, (row, 4), (1, 2), flag=wx.EXPAND)
b = self.btnScanner = app.QuiskCheckbutton(frame, self.OnBtnScanner, text='Scanner', use_right=True)
self.scan_timer = wx.Timer(b) # timed events for the scanner
b.Bind(wx.EVT_TIMER, self.OnTimerEvent)
gbs.Add(b, (row, 6), (1, 2), flag=wx.EXPAND)
b = self.btnNext = app.QuiskPushbutton(frame, self.OnBtnNext, 'Next', True)
gbs.Add(b, (row, 8), (1, 2), flag=wx.EXPAND)
b = app.QuiskCheckbutton(frame, self.OnBtnRptr, text='Rptr')
b.SetValue(True, True)
gbs.Add(b, (row, 10), (1, 2), flag=wx.EXPAND)
self.swr_label = app.QuiskText(frame, 'Watts 000 SWR 10.1 Zh Ind 22 Cap 33 Freq 28100 (7777)', bh)
gbs.Add(self.swr_label, (row, 15), (1, 12), flag=wx.EXPAND)
# Example of a horizontal slider:
# lab = wx.StaticText(frame, -1, 'Preamp', style=wx.ALIGN_CENTER)
# gbs.Add(lab, (5,0), flag=wx.EXPAND)
# sl = wx.Slider(frame, -1, 1024, 0, 2048) # parent, -1, initial, min, max
# gbs.Add(sl, (5,1), (1, 5), flag=wx.EXPAND)
# sl.Bind(wx.EVT_SCROLL, self.OnPreamp)
# def OnPreamp(self, event):
# print event.GetPosition()
def UpdateText(self, text):
self.swr_label.SetLabel(text)
def OnBtnRptr(self, event):
btn = event.GetEventObject()
if btn.GetValue():
self.config.freq_spacing = 5000
else:
self.config.freq_spacing = 0
def OnBtnNext(self, event):
self.direction = self.btnNext.direction # +1 for left -> go up; -1 for down
self.keep_going = wx.GetKeyState(wx.WXK_SHIFT) # if Shift is down, move to next band
self.scanner = False
if self.keep_going:
if not self.ScanScreen(event):
self.MoveVfo(event)
self.scan_timer.Start(500)
else:
self.ScanScreen(event)
def ScanScreen(self, event): # Look for signals on the current screen
lst = self.hardware.rpt_freq_list
app = self.application
vfo = app.VFO
tx_freq = vfo + app.txFreq
sample_rate = app.sample_rate
limit = int(sample_rate / 2.0 * self.config.display_fraction * 0.95) # edge of screen
self.scan_n1 = None
self.scan_n = None
for n in range(len(lst)):
if lst[n] > vfo - limit and self.scan_n1 is None:
self.scan_n1 = n # inclusive
if lst[n] >= tx_freq and self.scan_n is None:
self.scan_n = n
if lst[n] > vfo + limit:
break
self.scan_n2 = n # inclusive
if self.scan_n is None:
self.scan_n = self.scan_n1
if self.direction > 0: # left click; go up
seq = range(self.scan_n + 1, self.scan_n2 + 1)
if not self.keep_going:
seq += range(self.scan_n1, self.scan_n)
else: # right click; go down
seq = range(self.scan_n - 1, self.scan_n1 - 1, -1)
if not self.keep_going:
seq += range(self.scan_n2, self.scan_n, -1)
for n in seq:
freq = lst[n]
if not QS.get_squelch(freq - vfo):
app.ChangeHwFrequency(freq - vfo, vfo, 'Repeater', event)
return True # frequency was changed
return False # frequency was not changed
def MoveVfo(self, event): # Move the VFO to look for further signals
lst = self.hardware.rpt_freq_list
app = self.application
vfo = app.VFO
tx_freq = vfo + app.txFreq
sample_rate = app.sample_rate
if self.direction > 0: # left click; go up
n = self.scan_n2 + 1
if n >= len(lst):
n = 0
freq = lst[n]
vfo = freq + sample_rate * 4 / 10
app.ChangeHwFrequency(freq - vfo, vfo, 'Repeater', event)
else: # right click; go down
n = self.scan_n1 - 1
if n < 0:
n = len(lst) - 1
freq = lst[n]
vfo = freq - sample_rate * 4 / 10
app.ChangeHwFrequency(freq - vfo, vfo, 'Repeater', event)
def OnBtnScanner(self, event):
self.direction = self.btnScanner.direction # +1 for left -> go up; -1 for down
self.keep_going = wx.GetKeyState(wx.WXK_SHIFT) # if Shift is down, move to next band
self.scanner = True
if self.btnScanner.GetValue():
self.btnNext.Enable(0)
if self.keep_going:
if not self.ScanScreen(event):
self.MoveVfo(event)
else:
self.ScanScreen(event)
self.scan_timer.Start(500)
else:
self.btnNext.Enable(1)
self.scan_timer.Stop()
def OnTimerEvent(self, event):
if QS.get_squelch(self.application.txFreq):
if self.keep_going:
if not self.ScanScreen(event):
self.MoveVfo(event)
else:
self.ScanScreen(event)
elif not self.scanner:
self.scan_timer.Stop()
```
#### File: Quisk/n2adr/startup.py
```python
"Select the desired hardware, and start Quisk"
import sys, wx, subprocess, os
Choices = [
(' My Transceiver', 'n2adr/quisk_conf.py', ''),
(' VHF/UHF Receiver', 'n2adr/uhfrx_conf.py', ''),
(' Softrock Rx Ensemble', 'softrock/conf_rx_ensemble2.py', 'n2adr/conf2.py'),
(' Softrock Rx/Tx Ensemble', 'softrock/conf_rx_tx_ensemble.py', 'n2adr/conf6.py'),
(' Plain Sound Card, Rx only', 'n2adr/conf2.py', ''),
(' Test microphone sound', 'n2adr/conf4.py', ''),
(' SDR-IQ, receive only, antenna to RF input', 'quisk_conf_sdriq.py', 'n2adr/conf2.py'),
(' AOR AR8600 with IF to my hardware', 'n2adr/quisk_conf_8600.py', ''),
(' AOR AR8600 with IF to SDR-IQ', 'quisk_conf_sdr8600.py', 'n2adr/conf2.py'),
(' Fldigi with my transceiver', 'n2adr/quisk_conf.py', 'n2adr/conf5.py'),
(' Freedv.org Rx with my transceiver', 'n2adr/quisk_conf.py', 'n2adr/conf7.py'),
(' Hermes-Lite', 'hermes/quisk_conf.py', 'n2adr/conf3.py'),
(' Odyssey', 'odyssey/quisk_conf.py', 'n2adr/conf1.py'),
(' My Transceiver to Hermes-Lite', 'Quisk2Hermes', ''),
]
if sys.platform == 'win32':
os.chdir('C:\\pub\\quisk')
exe = "C:\\python27\\pythonw.exe"
else:
os.chdir('/home/jim/pub/quisk')
exe = "/usr/bin/python"
class ListBoxFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, 'Select Hardware')
font = wx.Font(14, wx.FONTFAMILY_SWISS, wx.NORMAL, wx.FONTWEIGHT_NORMAL)
self.SetFont(font)
charx = self.GetCharWidth()
chary = self.GetCharHeight()
width = 0
height = chary * 2
tlist = []
for txt, conf1, conf2 in Choices:
text = "%s, %s" % (txt, conf1)
if conf2:
text = "%s, %s" % (text, conf2)
tlist.append(text)
w, h = self.GetTextExtent(text)
width = max(width, w)
height += h
width += 3 * chary
lb = wx.ListBox(self, -1, (0, 0), (width, height), tlist, wx.LB_SINGLE)
lb.SetSelection(0)
lb.SetFont(font)
lb.Bind(wx.EVT_LISTBOX_DCLICK, self.OnDClick, lb)
lb.Bind(wx.EVT_KEY_DOWN, self.OnChar)
self.SetClientSize((width, height))
def OnDClick(self, event):
lb = event.GetEventObject()
index = lb.GetSelection()
text, conf1, conf2 = Choices[index]
if conf1 == "Quisk2Hermes":
subprocess.Popen([exe, 'quisk.py', '-c', 'n2adr/quisk_conf.py', '--local', 'Q2H'])
subprocess.Popen([exe, 'quisk.py', '-c', 'hermes/quisk_conf.py', '--config2', 'n2adr/conf3A.py', '--local', 'Q2H'])
else:
cmd = [exe, 'quisk.py', '-c', conf1]
if conf2:
cmd = cmd + ['--config2', conf2]
subprocess.Popen(cmd)
self.Destroy()
def OnChar(self, event):
if event.GetKeyCode() == 13:
self.OnDClick(event)
else:
event.Skip()
class App(wx.App):
def __init__(self):
if sys.stdout.isatty():
wx.App.__init__(self, redirect=False)
else:
wx.App.__init__(self, redirect=True)
def OnInit(self):
frame = ListBoxFrame()
frame.Show()
return True
app = App()
app.MainLoop()
```
#### File: SdrCwXcvr/Quisk/quisk_conf_kx3.py
```python
import sys, os
if sys.platform == "win32":
name_of_sound_capt = 'Primary'
name_of_sound_play = 'Primary'
latency_millisecs = 150
data_poll_usec = 20000
else:
name_of_sound_capt = 'hw:0'
name_of_sound_play = 'hw:0'
latency_millisecs = 150
data_poll_usec = 5000
# Use the hamlib hardware module to talk to the KX3
from quisk_hardware_hamlib import Hardware as BaseHardware
class Hardware(BaseHardware):
def __init__(self, app, conf):
BaseHardware.__init__(self, app, conf)
# Change the port and timing parameters here:
# self.hamlib_rigctld_port = 4532 # Standard rigctld control port
# self.hamlib_poll_seconds = 0.2 # Time interval to poll for changes
def open(self):
ret = BaseHardware.open(self)
if not self.hamlib_connected: # rigctld is not started. Try to start it.
os.system("rigctld -m 229 -r /dev/ttyUSB0 -s 4800 & ") # Check the baud rate menu setting
# If this fails, start rigctld by hand.
return ret
``` |
{
"source": "jmhaussaire/Bimaru",
"score": 3
} |
#### File: jmhaussaire/Bimaru/bimaru.py
```python
import numpy as np
import argparse
import sys
# This is my grid for the bimaru
# _ = blank
# w = water
# O = circle (single boat)
# X = square (in the middle of a boat)
# P = Top of boat
# C = left side of boat
# B = Bottom of boat
# D = right side of boat
# T = boat, but undetermined yet (could be edge, could be middle ...)
VERBOSE = 0
N_COL = 0
N_LINE = 0
###########################
## Could / should probably define a cell class with these methods
## Attributes:
## character
## changeability
## Define an order like X > T
###########################
## Function checks if a cell is part of a boat
def is_boat(grid,i,j):
return (is_boat_car(grid[i,j]))
def is_boat_car(cell):
return (cell in ["O","X","P","C","U","D","T"])
## Function checks if a cell is a valid cell (water, boat or empty)
def is_valid(target):
return (target in ["w","_","O","X","P","C","U","D","T"])
## Function checks if a cell is a valid cell for a new grid (cant be T)
def is_valid_beginning(target):
return (target in ["w","_","O","X","P","C","U","D"])
## Function checks if a cell is changeable (only _ and T are changeable).
def is_changeable(grid,i,j):
return grid[i,j] in ["_","T"]
## Function checks if a cell is empty
def is_empty(grid,i,j):
return (grid[i,j]=="_")
##############################
##############################
###########################
## Could / should probably define a grid class with these methods
## Attributes:
## the grid
## the boats in line /col
## Methods:
## Print grid with boats on the side
###########################
## Print the grid with the boats on the side
def print_grid(grid,boat_in_col,boat_in_line,final=False):
if final:
dim = (N_LINE-1, N_COL-1)
total_grid = np.zeros(dim,dtype=str)
for i in range(dim[0]-1):
for j in range(dim[1]-1):
total_grid[i,j]=grid[i+1,j+1]
total_grid[:,-1] = boat_in_line[1:-1]+["-"]
total_grid[-1,:] = boat_in_col[1:-1]+["-"]
else:
dim = (N_LINE+1, N_COL+1)
total_grid = np.zeros(dim,dtype=str)
for i in range(dim[0]-1):
for j in range(dim[1]-1):
total_grid[i,j]=grid[i,j]
total_grid[:,-1] = boat_in_line+["-"]
total_grid[-1,:] = boat_in_col+["-"]
print(total_grid)
return total_grid
## Check if a grid is valid :
## - right amount of boats per line/col
## - no touching boats
## - good amount of boats
def check_valid_grid(grid,boat_in_col,boat_in_line,boat_types):
## Checks if the amount of boats is right
for i in range(N_LINE):
if count_boat(grid[i,:])>boat_in_line[i]:
return False
if count_boat(grid[i,:])+count_empty(grid[i,:])<boat_in_line[i]:
return False
for j in range(N_COL):
if count_boat(grid[:,j])>boat_in_col[j]:
return False
if count_boat(grid[:,j]) + count_empty(grid[:,j]) <boat_in_col[j]:
return False
## Checks if the boat are not touching
for i in range(N_LINE):
for j in range(N_COL):
if is_boat(grid,i,j):
to_check = [(i+1,j+1) , (i-1,j+1) , (i+1,j-1) , (i-1,j-1)]
if grid[i,j]=="O":
to_check = to_check+[(i+1,j) , (i,j-1) , (i-1,j) , (i,j+1)]
if grid[i,j] =="P":
to_check = to_check+[ (i,j-1) , (i-1,j) , (i,j+1)]
if grid[i,j]=="D":
to_check= to_check+[(i+1,j) , (i-1,j) , (i,j+1)]
if grid[i,j]=="U":
to_check= to_check+[(i+1,j) , (i,j-1) , (i,j+1)]
if grid[i,j]=="C":
to_check= to_check+[(i+1,j) , (i,j-1) , (i-1,j) ]
for (x,y) in to_check:
if (grid[x,y]!="w" and grid[x,y]!="_") :
return False
## Checks if I have the good amount of boats
try:
check_remaining(grid,boat_types)
except ValueError:
return False
return True
##########################
##########################
## count the number of boats in a line/col
def count_boat(line):
return [is_boat_car(line[i]) for i in range(len(line))].count(True)
## count the number of empty spaces in a line/col
def count_empty(line):
return [line[i]=="_" for i in range(len(line))].count(True)
## Function provides the list of surrounding cells:
def surroundings(i,j):
return [(i+1,j+1) , (i-1,j+1) , (i+1,j-1) , (i-1,j-1) , (i+1,j) , (i,j-1) , (i-1,j) , (i,j+1)]
## Function provides the four corners around the cell:
def corners(i,j):
return [(i+1,j+1) , (i-1,j+1) , (i+1,j-1) , (i-1,j-1)]
## Function provides the list of cells up-left-down-right of a cell:
def cross(i,j):
return [(i+1,j) , (i,j-1) , (i-1,j) , (i,j+1)]
## Function returns the opposite cell of a given cell:
def opposite(i,j,x,y):
return (i+i-x , j+j-y)
## Function returns the perpendicular to a opposite:
def cross_rest(i,j,x,y):
if i==x:
return [(i+1,j), (i-1,j)]
else:
return [(i,j+1), (i,j-1)]
## Function checks if a cell is surrounded by things already
def is_surrounded(grid,i,j):
answer = True
for (x,y) in surroundings(i,j):
if not is_empty(grid,x,y):
answer = False
return answer
## Function checks if a T can be determined into O P B C D or X
def check_T(grid,i,j):
for (x,y) in cross(i,j):
if is_boat(grid,x,y):
(a,b) = opposite(i,j,x,y)
if grid[a,b]=="_":
return
if is_boat(grid,a,b):
change_cell(grid,i,j,"X")
return
if grid[a,b]=="w":
target=""
if a>i:
target = "U"
if a<i:
target = "P"
if b>j:
target = "D"
if b<j:
target = "C"
change_cell(grid,i,j,target)
return
if np.all([grid[x,y]=="w" for (x,y) in cross(i,j)]):
change_cell(grid,i,j,"O")
return
## Function changes the content of the cell i,j to the given target
def change_cell(grid,i,j,target):
if is_valid(target):
if is_changeable(grid,i,j):
grid[i,j]=target
else:
if grid[i,j]!=target:
if not (target=="T" and is_boat(grid,i,j)):
if VERBOSE>2:
print("OK, now we have a problem")
raise ValueError
else:
if VERBOSE>2:
print("What are you having me do ?")
raise ValueError
return
## Function that changes all surrounding cells if there's a boat
def found_boat(grid,i,j):
if not is_boat(grid,i,j):
if VERBOSE>2:
print("heu I made a mistake here")
raise ValueError
return
to_change = [(i+1,j+1) , (i-1,j+1) , (i+1,j-1) , (i-1,j-1)]
if grid[i,j]=="O":
to_change = to_change+[(i+1,j) , (i,j-1) , (i-1,j) , (i,j+1)]
if grid[i,j] =="P":
to_change = to_change+[ (i,j-1) , (i-1,j) , (i,j+1)]
change_cell(grid,i+1,j,"T")
if grid[i,j]=="D":
to_change = to_change+[(i+1,j) , (i-1,j) , (i,j+1)]
change_cell(grid,i,j-1,"T")
if grid[i,j]=="U":
to_change = to_change+[(i+1,j) , (i,j-1) , (i,j+1)]
change_cell(grid,i-1,j,"T")
if grid[i,j]=="C":
to_change = to_change+[(i+1,j) , (i,j-1) , (i-1,j) ]
change_cell(grid,i,j+1,"T")
if grid[i,j]=="T":
check_T(grid,i,j)
if grid[i,j]=="X":
for (x,y) in cross(i,j):
if is_boat(grid,x,y):
change_cell(grid,opposite(i,j,x,y)[0],opposite(i,j,x,y)[1],"T")
if grid[x,y]=="w":
change_cell(grid,opposite(i,j,x,y)[0],opposite(i,j,x,y)[1],"w")
for (a,b) in cross_rest(i,j,x,y):
change_cell(grid,a,b,"T")
for (x,y) in to_change:
change_cell(grid,x,y,"w")
return
## Function checks if a line has all its boats or just enough holes to fill it
def check_line(grid,n_boat,i):
boat_count=count_boat(grid[i,:])
empty_count=count_empty(grid[i,:])
if boat_count==n_boat:
for j in range(N_COL):
if is_empty(grid,i,j):
change_cell(grid,i,j,"w")
if (empty_count == (n_boat-boat_count)):
for j in range(N_LINE):
if is_empty(grid,i,j):
change_cell(grid,i,j,"T")
def check_col(grid,n_boat,j):
boat_count=count_boat(grid[:,j])
empty_count=count_empty(grid[:,j])
if boat_count==n_boat:
for i in range(N_LINE):
if is_empty(grid,i,j):
change_cell(grid,i,j,"w")
if (empty_count == (n_boat-boat_count)):
for i in range(N_LINE):
if is_empty(grid,i,j):
change_cell(grid,i,j,"T")
## Function check the amount of remaining boats to put in the grid
def check_remaining(grid,boat_types):
remaining_boats=boat_types.copy()
try:
for i in range(N_LINE):
boat_size=0
on_a_boat=False
for j in range(N_COL):
if on_a_boat:
boat_size+=1
if not is_boat(grid,i,j):
on_a_boat=False
boat_size=0
if grid[i,j]=="C":
boat_size+=1
on_a_boat=True
if grid[i,j]=="D":
if on_a_boat:
remaining_boats.remove(boat_size)
boat_size=0
if grid[i,j]=="O":
remaining_boats.remove(1)
for j in range(N_COL):
boat_size=0
on_a_boat=False
for i in range(N_LINE):
if on_a_boat:
boat_size+=1
if not is_boat(grid,i,j):
on_a_boat=False
boat_size=0
if grid[i,j]=="P":
boat_size+=1
on_a_boat=True
if grid[i,j]=="U":
if on_a_boat:
remaining_boats.remove(boat_size)
boat_size=0
except ValueError:
if VERBOSE>2:
print("I made a mistake in the kind of boats I put")
raise
return remaining_boats
## Function that checks if the boats can be closed, according to the remaining boats.
def check_T_remaining(grid,remaining):
for i in range(N_LINE):
if "T" in grid[i,:]:
on_a_boat=False
boat_size=0
has_T=False
T_indices=(0,0)
for j in range(N_COL):
if grid[i,j]=="T":
if on_a_boat:
boat_size+=1
if boat_size==max(remaining):
change_cell(grid,i,j,"D")
has_T=False
else:
has_T=True
T_indices=(i,j)
if is_boat(grid,i,j):
on_a_boat=True
boat_size+=1
if grid[i,j]=="D" and has_T and boat_size==max(remaining):
change_cell(grid,T_indices[0],T_indices[1],"C")
has_T=False
else:
on_a_boat=False
boat_size=0
has_T=False
for j in range(N_COL):
if "T" in grid[:,j]:
on_a_boat=False
boat_size=0
has_T=False
T_indices=(0,0)
for i in range(N_LINE):
if grid[i,j]=="T":
if on_a_boat:
boat_size+=1
if boat_size==max(remaining):
change_cell(grid,i,j,"U")
has_T=False
else:
has_T=True
T_indices=(i,j)
if is_boat(grid,i,j):
on_a_boat=True
boat_size+=1
if grid[i,j]=="U" and has_T and boat_size==max(remaining):
change_cell(grid,T_indices[0],T_indices[1],"P")
has_T=False
else:
on_a_boat=False
boat_size=0
has_T=False
## Function that looks where to put the biggest boats. Puts them in if possible
def look_for_space(grid,remaining_boats,boat_in_col,boat_in_line,value=0):
size=0
if value==0:
size = max(remaining_boats)
else:
size = min(value,max(remaining_boats))
## First check for the remaining boats in line and col
remain_boat_in_line=[]
for i in range(N_LINE):
n_boat=0
on_a_boat=False
boat_size=0
full_boat=True
for j in range(N_COL):
if is_boat(grid,i,j):
on_a_boat=True
boat_size+=1
if grid[i,j-1]=="_":
full_boat=False
if grid[i,j]=="w" and on_a_boat:
on_a_boat=False
n_boat += (boat_size*full_boat)
boat_size=0
full_boat=True
if grid[i,j]=="_" and on_a_boat:
on_a_boat=False
boat_size=0
full_boat=True
remain_boat_in_line.append(boat_in_line[i]-n_boat)
remain_boat_in_col=[]
for j in range(N_COL):
n_boat=0
on_a_boat=False
boat_size=0
full_boat=True
for i in range(N_LINE):
if is_boat(grid,i,j):
on_a_boat=True
boat_size+=1
if grid[i-1,j]=="_":
full_boat=False
if grid[i,j]=="w" and on_a_boat:
on_a_boat=False
n_boat += (boat_size*full_boat)
boat_size=0
full_boat=True
if grid[i,j]=="_" and on_a_boat:
on_a_boat=False
boat_size=0
full_boat=True
remain_boat_in_col.append(boat_in_col[j]-n_boat)
## Now look for space in the rows and columns that have big enough boat_in
n_spaces=0
space_indices=[]
for i in range(N_LINE):
if remain_boat_in_line[i]<size:
continue
n_empty=0
origin=(0,0)
boat_size=0
for j in range(N_COL):
if is_empty(grid,i,j):
n_empty+=1
if origin==(0,0):
origin=(i,j)
elif is_boat(grid,i,j):
boat_size+=1
if origin==(0,0):
origin=(i,j)
elif grid[i,j]=="w":
if n_empty>0:
n_empty+=boat_size
if n_empty>=size:
n_spaces+=1
space_indices.append((origin,(i,j-1)))
n_empty=0
origin=(0,0)
boat_size=0
for j in range(N_COL):
if remain_boat_in_col[j]<size:
continue
n_empty=0
origin=(0,0)
boat_size=0
for i in range(N_LINE):
if is_empty(grid,i,j):
n_empty+=1
if origin==(0,0):
origin=(i,j)
elif is_boat(grid,i,j):
boat_size+=1
if origin==(0,0):
origin=(i,j)
elif grid[i,j]=="w":
if n_empty>0:
n_empty+=boat_size
if n_empty>=size:
n_spaces+=1
space_indices.append((origin,(i-1,j)))
n_empty=0
origin=(0,0)
boat_size=0
## checks if there wasnt a mistake :
## _,_,_,w,_T,D,3 would give me 2 possibilities
## In fact, I dont need to check this.
## With the possibility to add a boat at random
## Then I will simply add a boat in that hole
## and notice I have too many in the row and exclude this possibility
## _,_,_,_,_,_,_,6 would give me only 1 possibility for a 3 space
for ((x1,y1),(x2,y2)) in space_indices[:]:
n_boat=0
if x1==x2:
n_boat = count_boat(grid[x1,:]) - count_boat(grid[x1,y1:y2+1])
if n_boat+size > boat_in_line[x1]:
space_indices.remove(((x1,y1),(x2,y2)))
n_possible_boat = int(np.floor((y2-y1+2)/(size+1)))
for i in range(2,n_possible_boat+1):
if i*size + n_boat <= boat_in_line[x1]:
space_indices.append(((x1,y1),(x2,y2)))
else: #y1==y2
n_boat = count_boat(grid[:,y1]) - count_boat(grid[x1:x2+1:,y1])
if n_boat+size > boat_in_col[y1]:
space_indices.remove(((x1,y1),(x2,y2)))
n_possible_boat = int(np.floor((x2-x1+2)/(size+1)))
for i in range(2,n_possible_boat+1):
if i*size + n_boat <= boat_in_col[y1] :
space_indices.append(((x1,y1),(x2,y2)))
if VERBOSE>2:
print("spaces for boat ",size," after : ",space_indices)
if len(space_indices)<[i>size-1 for i in remaining_boats].count(True):
if VERBOSE>2:
print("c'est bizarre")
print_grid(grid,boat_in_col,boat_in_line)
raise ValueError
if len(space_indices) == [i>size-1 for i in remaining_boats].count(True):
for space in space_indices:
fit_boat_in_space(grid,size,space)
return space_indices
## Function fits a boat in a given space
def fit_boat_in_space(grid,boat_size,space):
space_size = max(space[1][0]-space[0][0] , space[1][1] - space[0][1])+1
if space_size<boat_size:
if VERBOSE>2:
print("houston, we have a problem")
raise ValueError
step=0
for i in range(space[0][0],space[1][0]+1):
for j in range(space[0][1], space[1][1]+1):
if not (step<space_size-boat_size or step>=boat_size):
change_cell(grid,i,j,"T")
step+=1
return
## Checks if the game is done:
def is_done(grid):
return not ("_" in grid)
## The different iterations to be done
def iterate(grid,boat_in_col,boat_in_line,boat_types,iterations=0):
while True:
if VERBOSE>1:
print("iter : ",iterations)
if VERBOSE>3:
print_grid(grid,boat_in_col,boat_in_line)
iterations+=1
old_grid = grid.copy()
### Step 1 : fill water according to boats
###########
for i in range(1,N_LINE):
for j in range(1,N_COL):
if (is_boat(grid,i,j)):
found_boat(grid,i,j)
if np.any(old_grid!=grid):
continue
if is_done(grid):
return grid
### Step 2 : check lines and columns for the number of boats
###########
for i in range(len(boat_in_col)):
check_col(grid,boat_in_col[i],i)
for i in range(len(boat_in_line)):
check_line(grid,boat_in_line[i],i)
if np.any(old_grid!=grid):
continue
if is_done(grid):
return grid
### Step 3 : Check if some boats can be closed off
###########
remaining_boats=check_remaining(grid,boat_types)
check_T_remaining(grid,remaining_boats)
if np.any(old_grid!=grid):
continue
if is_done(grid):
return grid
### Step 4 : Check for the big boats
###########
remaining_boats=check_remaining(grid,boat_types)
if VERBOSE>2:
print("remaining_boats = ",remaining_boats)
if len(remaining_boats)>0:
to_continue=True
for value in sorted(set(remaining_boats),reverse=True):
if value>1 and to_continue:
look_for_space(grid,remaining_boats,boat_in_col,boat_in_line,value)
if np.any(old_grid!=grid):
to_continue=False
if np.any(old_grid!=grid):
continue
else:
break
return grid
## Add a random cell and checks the consequences it has
def add_random(grid,boat_in_col,boat_in_line,boat_types,target):
if target not in ["w","T"]:
print("i think there has been a confusion with the parameter")
raise ValueError
to_fill = np.where(grid=="_")
safe_grid=grid.copy()
for (i,j) in list(zip(to_fill[0],to_fill[1])):
if VERBOSE>3:
print_grid(grid,boat_in_col,boat_in_line)
if VERBOSE>2:
print("Trying to change cell : ",i,j," with a ",target)
#input("waiting")
change_cell(grid,i,j,target)
try:
iterate(grid,boat_in_col,boat_in_line,boat_types)
except:
if VERBOSE>2:
print("there has been a mistake so I guess this test failed")
if VERBOSE>2:
print("after trying, is grid valid : ", check_valid_grid(grid,boat_in_col,boat_in_line,boat_types))
if VERBOSE>3:
print_grid(grid,boat_in_col,boat_in_line)
if check_valid_grid(grid,boat_in_col,boat_in_line,boat_types) and is_done(grid):
return grid
else:
if not check_valid_grid(grid,boat_in_col,boat_in_line,boat_types):
if target=="w":
new_target="T"
else: #target =="T"
new_target="w"
change_cell(safe_grid,i,j,new_target)
grid=safe_grid.copy()
break
grid=safe_grid.copy()
return grid
## Add a random boat and checks the consequences it has
def add_random_boat(grid,remaining_boats,boat_in_col,boat_in_line,boat_types):
if VERBOSE>2:
print("remaining_boats = ",remaining_boats)
safe_grid=grid.copy()
if len(remaining_boats)>0:
for value in sorted(set(remaining_boats),reverse=True):
if value>1:
spaces = look_for_space(grid,remaining_boats,boat_in_col,boat_in_line,value)
for ((x1,y1,),(x2,y2)) in spaces:
new_spaces=[]
if (x1==x2):
for i in range(y1,y2-value+2):
new_spaces.append((x1,i),(x2,i+value-1))
else: #y1==y2
for i in range(x1,x2-value+2):
new_spaces.append((i,y1),(i+value-1,y2))
for new_space in new_spaces:
try:
grid = fit_boat_in_space(grid,value,new_space)
iterate(grid,boat_in_col,boat_in_line,boat_types)
except:
if VERBOSE>2:
print("there has been a mistake so I guess this test failed")
if VERBOSE>2:
print("after trying, is grid valid : ", check_valid_grid(grid,boat_in_col,boat_in_line,boat_types))
if VERBOSE>3:
print_grid(grid,boat_in_col,boat_in_line)
if check_valid_grid(grid,boat_in_col,boat_in_line,boat_types) and is_done(grid):
return grid
else:
grid=safe_grid.copy()
return safe_grid
## The different iterations to be done
## This time, it includes trying things at random
def iterate_random(grid,boat_in_col,boat_in_line,boat_types,iterations=0):
while True:
grid= iterate(grid,boat_in_col,boat_in_line,boat_types)
old_grid = grid.copy()
### Step 1 : Add a random boat_point somewhere
###########
grid = add_random(grid,boat_in_col,boat_in_line,boat_types,"T")
if np.any(old_grid!=grid):
continue
if is_done(grid):
return grid
### Step 2 : Add a random water somewhere
###########
grid = add_random(grid,boat_in_col,boat_in_line,boat_types,"w")
if np.any(old_grid!=grid):
continue
if is_done(grid):
return grid
### Step 3 : Add a boat at random
###########
remaining_boats=check_remaining(grid,boat_types)
check_T_remaining(grid,remaining_boats)
remaining_boats=check_remaining(grid,boat_types)
grid = add_random_boat(grid,remaining_boats,boat_in_col,boat_in_line,boat_types)
return grid
## Load a grid from file and check its consistency
def load_grid(path_to_grid):
grid_list = []
boat_in_line = []
boat_in_col = []
water_line=[]
n_line = 0
n_col = 0
with open(path_to_grid) as grid_in:
for l in grid_in:
if VERBOSE>1:
print(l)
n_line+=1
line_arr = l.strip().split(sep=',')
# First line, get the numer of columns and add a line full of water
if n_col==0:
n_col = len(line_arr)
if n_line ==1:
for i in range(n_col+1):
water_line.append("w")
grid_list.append(water_line)
boat_in_line.append(0)
if len(line_arr)!= n_col:
print("Error in the size of the grid : inconsistent amount of columns")
sys.exit(0)
if n_line!= n_col:
for c in line_arr[:-1]:
if not is_valid_beginning(c):
print("Error in the grid : invalid character ",c)
sys.exit(0)
grid_list.append(["w"]+line_arr[:-1]+["w"])
boat_in_line.append(int(line_arr[-1]))
else:
grid_list.append(water_line)
boat_in_line.append(0)
boat_in_col = [0]+[int(i) for i in line_arr[:-1]]+[0]
if n_line!=n_col:
print("Error in the size of the grid : grid not square")
sys.exit(0)
### Last check for the grid
total_boats = sum(boat_types)
boats_line = sum(boat_in_line)
boats_col = sum(boat_in_col)
if boats_line != boats_col:
print("Check your initial grid, you made a mistake",boats_line,boats_col)
raise ValueError
if total_boats != boats_col:
print("Check your initial grid, you made a mistake",total_boats,boats_col)
raise ValueError
grid = np.array(grid_list)
return grid,boat_in_col,boat_in_line
###################################
########## main ###############
###################################
#if __name__ = main
if __name__ == "__main__":
text_grid = u"""
The grid should be square and follow the following standards :
_ = blank ;
w = Water ;
O = Circle (single boat) ;
X = Square (in the middle of a boat) ;
P = Top of boat ;
C = Left side of boat ;
U = Bottom of boat ;
D = Right side of boat ;
each cell is separated by a comma (,)
Example of a grid:
_,_,_,_,_,_,_,_,_,5
_,_,_,_,_,_,_,_,_,0
_,_,_,_,_,_,w,_,_,2
_,_,_,_,_,_,_,_,_,3
_,_,_,_,_,_,C,_,_,3
_,_,_,_,U,_,_,_,_,2
_,_,X,_,_,_,_,_,_,1
_,_,_,_,_,_,_,_,_,4
_,_,_,_,_,_,_,_,_,0
2,1,3,0,6,0,3,3,2,
"""
text_size = """
The size argument determines the amount of boats in the grid.
It is supposed that there are :
1 boat of maximum size
2 boats of size max-1
3 boats of size max-2
n boats of size max+1-n
"""
text_output = """Path to output the grid.
Default = [path]_answer.[ext]"""
parser = argparse.ArgumentParser(
description="This program solves a bimaru.",
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument('path', type=str,
help='Path to the grid' + text_grid)
parser.add_argument('size', type=int,
help='Size of the biggest boat.'+text_size)
parser.add_argument('--output', type=str,
help=text_output)
parser.add_argument('--verbose', type=int,default=0,
help='verbose level.')
args = parser.parse_args()
path_to_grid = args.path
max_boat_size = args.size
ofile=args.output
VERBOSE = args.verbose
if ofile==None:
if len(path_to_grid.split("."))>2:
print("dont put dots in your paths you psycho!")
exit
else:
ofile = path_to_grid.split(".")[0]+"_answer."+path_to_grid.split(".")[1]
## Define the list of boats from size
boat_types=[]
for i in range(max_boat_size,0,-1):
boat_types = boat_types+list(range(i,0,-1))
boat_types = sorted(boat_types,reverse=True)
## Get the grid
grid,boat_in_col,boat_in_line = load_grid(path_to_grid)
N_LINE = grid.shape[0]
N_COL = grid.shape[1]
## The main work
grid = iterate_random(grid,boat_in_col,boat_in_line,boat_types)
## Over
########
if check_valid_grid(grid,boat_in_col,boat_in_line,boat_types) and is_done(grid):
print("wouhou, Im done !")
print("here is the answer:")
output = print_grid(grid,boat_in_col,boat_in_line,final=True)
np.savetxt(ofile,output,fmt="%s",delimiter=",")
else:
print_grid(grid,boat_in_col,boat_in_line)
print("Im stuck, I need help")
``` |
{
"source": "jmhaussaire/SOAI-Project",
"score": 3
} |
#### File: jmhaussaire/SOAI-Project/common.py
```python
import numpy as np
import cv2
import tensorflow as tf
from keras.backend import set_session
from uuid import uuid4
from random import randint
red = (255,0,0)
black = (0,0,0)
def show_LOIs(frame, LOIs):
for LOI in LOIs:
cv2.line(frame, (LOI[0][0],LOI[0][1]),(LOI[1][0],LOI[1][1]), red, 3)
def show_LOI_Info(frame, object_class, ovwrClass, lane, time):
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
cv2.rectangle(frame, (10, 10), (700, 170), (180, 132, 109), -1)
cv2.putText(
frame,
'Overwriting Vehicle Class',
(11, 40),font,1.5,black,1,font)
cv2.putText(
frame,
'Lane: ' + str(lane),
(11, 80),font,1,black,1,font)
cv2.putText(
frame,
'Overwrite Vehicle Type: %s with %s' %(object_class, ovwrClass),
(11, 110),font,1,black,1,font)
cv2.putText(
frame,
'Timestamp: ' + time,
(11, 140),font,1,black,1,font)
def random_color():
return (randint(0, 255), randint(0, 255), randint(0, 255))
def limit_gpu_memory(fraction):
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = fraction
set_session(tf.Session(config=config))
class Detection:
def __init__(self, tlwh, frame_no, object_class=None, confidence=None, predicted=False):
self.tlwh = np.array(tlwh).astype(float)
self.frame_no = frame_no
self.object_class = object_class
self.confidence = confidence
self.predicted = predicted
self.id = uuid4()
def to_tlbr(self):
ret = self.tlwh.copy()
ret[2:] += ret[:2]
return ret
def to_xyah(self):
ret = self.tlwh.copy()
ret[:2] += ret[2:] / 2
ret[2] /= ret[3]
return ret
def to_bottom(self):
ret = self.tlwh.copy()
ret[2:] += ret[:2]
if ret[2] != ret[0]:
ret[0] += (ret[2] - ret[0]) / 2
ret[1] = ret[3]
return ret
def show_center(self, frame, color, width=2):
xyah = self.to_xyah()
cv2.circle(frame, (int(xyah[0]), int(xyah[1])), 3, color, width)
def show(self, frame, color=(0, 0, 0), res=None, lane=9, ovwrClass=None, width=2):
tlbr = self.to_tlbr()
if ovwrClass is not None:
cv2.rectangle(frame, (int(tlbr[0]), int(tlbr[1])), (int(
tlbr[2]), int(tlbr[3])), color, width * 4)
cv2.putText(frame, ovwrClass, (int(tlbr[0]), int(
tlbr[1])), cv2.FONT_HERSHEY_DUPLEX, 1, black, 1, 2)
elif res is not None:
cv2.rectangle(frame, (int(tlbr[0]), int(tlbr[1])), (int(
tlbr[2]), int(tlbr[3])), color, width * 2)
cv2.putText(frame, res, (int(tlbr[0]), int(
tlbr[1])), cv2.FONT_HERSHEY_DUPLEX, 1, black, 1, 2)
else:
cv2.rectangle(frame, (int(tlbr[0]), int(tlbr[1])), (int(
tlbr[2]), int(tlbr[3])), color, width, 4)
cv2.putText(frame, "%s (%.1f)" % (self.object_class, self.confidence), (int(
tlbr[0]), int(tlbr[3])), cv2.FONT_HERSHEY_COMPLEX, 0.5, black, 1, 2)
if lane != 9:
cv2.putText(frame, str(lane), (int(tlbr[2]), int(
tlbr[1])), cv2.FONT_HERSHEY_DUPLEX, 1, black, 1, 2)
def intersection(self, x):
a = self.to_tlbr()
b = x.to_tlbr()
x_left = max(a[0], b[0])
y_top = max(a[1], b[1])
x_right = min(a[2], b[2])
y_bottom = min(a[3], b[3])
if x_right < x_left or y_bottom < y_top:
return 0.0
return (x_right - x_left) * (y_bottom - y_top)
def iou(self, b):
intersection_area = self.intersection(b)
a_area = self.tlwh[2] * self.tlwh[3]
b_area = b.tlwh[2] * b.tlwh[3]
return intersection_area / float(a_area + b_area - intersection_area)
def is_inside(self, x):
a, b = self.to_tlbr(), x.to_tlbr()
return a[0] >= b[0] and a[1] >= b[1] and a[2] <= b[2] and a[3] <= b[3]
def is_center_inside(self, x):
a, b = self.to_xyah(), x.to_tlbr()
return a[0] >= b[0] and a[1] >= b[1] and a[0] <= b[2] and a[1] <= b[3]
def get_ious(self, detections):
return [self.iou(x) for x in detections]
def get_max_iou(self, detections):
ious = self.get_ious(detections)
i = np.argmax(ious)
return i, ious[i]
@staticmethod
def from_frame(shape, padding=0):
tlwh = (padding, padding, shape[0] -
2 * padding, shape[1] - 2 * padding)
return Detection(tlwh, 0)
```
#### File: jmhaussaire/SOAI-Project/IOUTracker.py
```python
import numpy as np
from uuid import uuid4
from common import random_color
from collections import Counter
from shapely.geometry import LineString
import operator
import math
class TrackState:
Tentative = 1
Confirmed = 2
Deleted = 3
Finished = 4
class lane_range:
def __init__(self, lx, ly):
self.lx = lx
self.ly = ly
self.slope = (ly[0] - ly[1]) / (lx[1] - lx[0])
def yValue(self, x):
return self.ly[0] + (-1 * self.slope * x)
class Track:
def __init__(self, detection, max_age, n_init, sigma_h, color=None):
self.color = color if color is not None else random_color()
self.detections = [detection]
self.ious = []
self.id = uuid4()
self._max_age = max_age
self._n_init = n_init
self.hits = 1
self.age = 1
self.time_since_update = 0
self.state = TrackState.Tentative
self.sigma_h = sigma_h
self.likelyClass = []
self.res = None
self.ovwr = False
self.ovwrClass = None
self.gotLanePoint = False
self.lane = 0
self.l1 = lane_range([0, 1400], [610, 490])
self.l2 = lane_range([0, 1400], [710, 520])
self.counted = False
self.direction = None
self.intersection = None
def lane_detector(self):
if self.is_confirmed():
bottom = self.detections[-1].to_bottom()
if int(bottom[1]) > self.l2.yValue(int(bottom[0])):
self.lane = 0
elif int(bottom[1]) > self.l1.yValue(int(bottom[0])):
self.lane = 1
else:
self.lane = 2
def csv_detector(self, LOIs):
lineDetections = []
if self.counted is False and self.is_confirmed() and self.res != "car" and self.lane != 2:
for count_line in LOIs:
detected, vector = self.intersection_detection(count_line)
lineDetections.append(detected)
return lineDetections
def bearing_calc(self, Ax, Ay, Bx, By):
TWOPI = 6.2831853071795865
RAD2DEG = 57.2957795130823209
theta = math.atan2((Bx - Ax), (Ay - By))
if theta < 0.0:
theta += TWOPI
return (RAD2DEG * theta)
def line_intersection(self, line_a, line_b):
line1 = LineString(line_a)
line2 = LineString(line_b)
return line1.intersects(line2)
def intersection_detection(self, detectionLine):
trackLenght = len(self.detections)
bottom0 = self.detections[0].to_bottom()
bottom1 = self.detections[-1].to_bottom()
trackVector = LineString(
[(bottom0[0], bottom0[1]), (bottom1[0], bottom1[1])])
if self.line_intersection(detectionLine, trackVector):
self.counted = True
self.vehicle_direction()
return True, trackVector
else:
return False, None
def bounding_box_coord(self, value=-1, place="bottom"):
if place == "bottom":
return [int(self.detections[value].to_tlbr()[0] + (self.detections[value].to_tlbr()[2] - self.detections[value].to_tlbr()[0]) / 2), int(self.detections[value].to_tlbr()[3])]
elif place == "middle":
return [int(self.detections[value].to_xyah()[0]), int(self.detections[value].to_xyah()[1])]
def vehicle_direction(self):
directionNum = 0
if self.is_confirmed():
bottom0 = self.detections[0].to_bottom()
bottom1 = self.detections[-1].to_bottom()
directionNum = self.bearing_calc(
bottom0[0], bottom0[1], bottom1[0], bottom1[1])
if 315 < directionNum or directionNum < 45:
self.direction = "up"
elif 45 < directionNum < 135:
self.direction = "right"
elif 135 < directionNum < 225:
self.direction = "down"
elif 225 < directionNum < 315:
self.direction = "left"
def likely_class(self):
self.likelyClass.append(
[self.detections[-1].object_class, self.detections[-1].confidence])
d = dict()
for sl in self.likelyClass:
d[sl[0]] = d.get(sl[0], 0) + sl[1]
self.res = max(d.items(), key=operator.itemgetter(1))[0]
def predict(self):
# does nothing because this is a simply IOU tracker with no propagation
self.age += 1
self.time_since_update += 1
def update(self, detection):
self.ious.append(detection.iou(self.detections[-1]))
self.detections.append(detection)
self.hits += 1
self.time_since_update = 0
if self.state == TrackState.Tentative and self.hits >= self._n_init:
self.state = TrackState.Confirmed
def mark_missed(self):
if self.state == TrackState.Tentative:
self.state = TrackState.Deleted
elif max(self.ious) > self.sigma_h:
self.state = TrackState.Finished
else:
self.state = TrackState.Deleted
def get_ious(self, detections):
return [self.detections[-1].iou(x) for x in detections]
def show_history(self, frame, width=2, n=30):
# objectList = []
if self.is_confirmed():
self.detections[-1].show(frame, self.color,
self.res, self.lane, self.ovwrClass, width=width)
for b in self.detections[-n:]:
b.show_center(frame, self.color, width=-1)
def is_tentative(self):
return self.state == TrackState.Tentative
def is_confirmed(self):
return self.state == TrackState.Confirmed
def is_deleted(self):
return self.state == TrackState.Deleted
def is_finished(self):
return self.state == TrackState.Finished
class IOUTracker:
def __init__(self, sigma_iou_discard=0.05, sigma_iou=0.4, sigma_h=0.7, max_age=2, n_init=3):
self.sigma_iou_discard = sigma_iou_discard
self.sigma_iou = sigma_iou # minimal to be consider as overlapping
self.sigma_h = sigma_h
self.max_age = max_age
self.n_init = n_init
self.finished_tracks = []
self.active_tracks = []
def predict(self):
for track in self.active_tracks:
track.predict()
def update(self, detections, frame_bbox):
for track in self.active_tracks:
ious = np.array(track.get_ious(detections))
if len(ious) == 0:
track.mark_missed()
else:
i = np.argmax(ious)
if ious[i] >= self.sigma_iou:
track.update(detections[i])
detections.remove(detections[i])
else:
track.mark_missed()
for detection in detections:
ious = detection.get_ious([track.detections[-1]
for track in self.active_tracks])
# skip those that sufficiently overlap with existing active tracks
if not np.any(np.array(ious) > self.sigma_iou_discard) and detection.is_inside(frame_bbox):
self.active_tracks.append(
Track(detection, self.max_age, self.n_init, self.sigma_h))
tracks_finished = [
track for track in self.active_tracks if track.is_finished()]
tracks_deleted = [
track for track in self.active_tracks if track.is_deleted()]
self.finished_tracks += tracks_finished
for track in tracks_finished + tracks_deleted:
self.active_tracks.remove(track)
```
#### File: jmhaussaire/SOAI-Project/vid_process.py
```python
import csv
import os
import pandas as pd
import cv2
from detectionprovider import DetectionProvider
from IOUTracker import IOUTracker
from video import VideoStreamReader, VideoStreamWriter
from tqdm import tqdm_notebook
from pascal_voc_writer import Writer as LabelWriter
from common import Detection
def process_vid(in_vid_name,seconds_count,seconds_skip,
yolo,
out_folder,csv_out,out_vid_name):
"""
Look at every frame of the video and categorise the vehicles.
Output an xml file listing the tracks, and a picture with the box.
output a csv file with the list of timestamp and vehicles
"""
vidreader = VideoStreamReader(
in_vid_name, seconds_count=seconds_count, seconds_skip=seconds_skip, width=1920, height=1080)
vidwriter = VideoStreamWriter(
out_vid_name, width=vidreader.width, height=vidreader.height, fps=vidreader.fps)
detection_provider = DetectionProvider(yolo)
# define the detection zone??
padding = 0
frame_bbox = Detection.from_frame(
(vidreader.width, vidreader.height), int(padding))
tracker = IOUTracker()
csv_writer = True
ovwrInfo = ('-','-','-','-')
# initialize .csv
with open(csv_out, 'w+') as f:
writer = csv.writer(f)
csv_line = 'timestamp,vehicle,direction,id'
writer.writerows([csv_line.split(',')])
# nice progress bar:
pbar = tqdm_notebook(total=vidreader.frame_count - vidreader.frame_skip)
# main loop
while True:
frame = vidreader.next_frame()
if frame is None:
break
imgName = "img_%s" % (vidreader.frame_no)
XMLPathName = os.path.join(out_folder, imgName)
imgPathName = os.path.join(out_folder, imgName)
labelwriter = LabelWriter(XMLPathName, vidreader.width, vidreader.height)
#yolo draws bounding boxes (bb) right into the image.
#Since I want to be able to save images without bb, I have to make a copy:
frame_copy = frame.copy()
captureFrame = True
captureCSV = True
pbar.update()
#yolo detection:
detections = detection_provider.detect_boxes(frame, vidreader.frame_no)
tracker.update(detections.copy(), frame_bbox)
for track in tracker.active_tracks:
track.likely_class()
# I don't know yet the lane or the LOI
# track.lane_detector()
# lineDetections = track.csv_detector(LOIs)
track.show_history(frame)
vTime = vidreader.capture.get(cv2.CAP_PROP_POS_MSEC) / 1000
nowTime = pd.Timedelta(seconds=vTime)
if captureCSV:
if track.ovwrClass is not None:
csv_line = [str(nowTime),
str(track.ovwrClass),str(track.direction),str(track.id)]
elif track.res is not None:
csv_line = [str(nowTime),
str(track.res),str(track.direction),str(track.id)]
else:
print("Hä?")
with open(csv_out, 'a') as f:
writer = csv.writer(f)
#(timestamp, vehicle, direction) = csv_line
writer.writerows([csv_line])
#print("writing to csv")
captureCSV = False
if captureFrame:
dbox = track.detections[-1].to_tlbr()
bx1 = dbox[0]
by1 = dbox[1]
bx2 = dbox[2]
by2 = dbox[3]
if track.res is None:
print("track.res is None?!", track.res)
#track.res = track.detections[-1].object_class
if track.ovwrClass is not None:
labelwriter.addObject(track.ovwrClass, bx1, by1, bx2, by2, pose = track.id)
elif track.res is not None:
labelwriter.addObject(track.res, bx1, by1, bx2, by2, pose = track.id)
else:
labelwriter.addObject("unknown", bx1, by1, bx2, by2, pose = track.id)
labelwriter.save(XMLPathName + ".xml")
#print("write xml")
vidwriter.write(frame)
#save image every 4 frames if non-car is detected
#or has been detected max. 13 frames before:
if captureFrame:
# without bounding box
# cv2.imwrite(imgPathName + ".jpg",
# cv2.cvtColor(frame_copy, cv2.COLOR_RGB2BGR))
# with bounding box
cv2.imwrite(imgPathName + ".jpg",
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
#print("save image")
pbar.close()
vidreader.release()
vidwriter.release()
print("job done")
``` |
{
"source": "jmhayes3/pmaw",
"score": 2
} |
#### File: pmaw/pmaw/auth.py
```python
from requests.auth import AuthBase
class Authenticator(AuthBase):
def __init__(self, access_token):
self.access_token = access_token
def __call__(self, r):
r.headers['x-messari-api-key'] = self.access_token
return r
```
#### File: pmaw/pmaw/listing.py
```python
from copy import deepcopy
from .models.base import PMAWBase
from .exceptions import NotFound
class ListingGenerator(PMAWBase):
def __init__(self, messari, path, limit=500, params=None):
super().__init__(messari, _data=None)
self.exhausted = False
self.batch = None
self.batch_index = None
self.yielded = 0
self.path = path
self.limit = limit
self.params = {}
if params:
self.params = deepcopy(params)
self.params["limit"] = limit or 500
self.params["page"] = 1
def __iter__(self):
return self
def __next__(self):
if self.limit is not None and self.yielded >= self.limit:
raise StopIteration
if self.batch is None or self.batch_index >= len(self.batch):
if self.exhausted:
raise StopIteration
try:
batch = self._messari.request(
"GET",
self.path,
self.params
).json()["data"]
batch = self._messari.parser.parse(batch)
if batch and isinstance(batch, list):
self.batch = batch
else:
raise StopIteration
except NotFound:
raise StopIteration
if len(self.batch) < self.params["limit"]:
self.exhausted = True
self.batch_index = 0
self.params["page"] += 1
self.batch_index += 1
self.yielded += 1
return self.batch[self.batch_index - 1]
```
#### File: pmaw/models/base.py
```python
from copy import deepcopy
class PMAWBase:
def __init__(self, messari, _data=None):
self._messari = messari
if _data:
for attribute, value in _data.items():
setattr(self, attribute, value)
@staticmethod
def _safely_add_arguments(argument_dict, key, **new_arguments):
value = deepcopy(argument_dict[key]) if key in argument_dict else {}
value.update(new_arguments)
argument_dict[key] = value
@staticmethod
def _safely_add_argument(argument_dict, key, new_key, new_value):
value = deepcopy(argument_dict[key]) if key in argument_dict else {}
value.update({new_key: new_value})
argument_dict[key] = value
@classmethod
def from_data(cls, messari, data):
return cls(messari, _data=data)
class MessariBase(PMAWBase):
def __init__(self, messari, _data=None, _fetched=False):
super().__init__(messari, _data=_data)
self._fetched = _fetched
def __getattr__(self, attribute):
if not attribute.startswith("_") and not self._fetched:
self._fetch()
return getattr(self, attribute)
raise AttributeError
def _fetch(self):
self._fetched = True
def _reset_attributes(self, *attributes):
for attribute in attributes:
if attribute in self.__dict__:
del self.__dict__[attribute]
self._fetched = False
```
#### File: pmaw/models/market.py
```python
from .base import PMAWBase
from ..endpoints import API_PATH
from ..timeseries import TimeseriesGenerator
class Market(PMAWBase):
"""Market data."""
def __init__(self, messari, id=None, _data=None):
if (id, _data).count(None) != 1:
raise TypeError("Either `id` or `_data` required.")
if id:
self.id = id
super().__init__(messari, _data=_data)
def timeseries(self, metric, start, end, interval="1d", limit=None):
path = API_PATH["market_metric_time_series"].format(
market=self.id,
metric=metric
)
params = dict(start=start, end=end, interval=interval)
return TimeseriesGenerator(self._messari, path, params, limit)
```
#### File: pmaw/pmaw/session.py
```python
from copy import deepcopy
from urllib.parse import urljoin
from requests import codes
from .exceptions import (
ResponseException,
BadRequest,
Unauthorized,
Forbidden,
NotFound,
TooManyRequests,
)
from .const import API_PREFIX
from .rate_limiter import RateLimiter
from .request_handler import RequestHandler
class Session:
def __init__(self, request_handler=None, api_prefix=None):
self.request_handler = request_handler or RequestHandler()
self.api_prefix = api_prefix or API_PREFIX
self.rate_limiter = RateLimiter()
def _close(self):
self.request_handler.close()
def __enter__(self):
return self
def __exit_(self, *_args):
self._close()
def _request(self, method, url, params):
print(method, url, params)
response = self.rate_limiter.call(
self.request_handler.request,
method,
url,
params
)
if response.status_code == codes.ok:
return response
elif response.status_code == codes.bad_request:
raise BadRequest(response)
elif response.status_code == codes.unauthorized:
raise Unauthorized(response)
elif response.status_code == codes.forbidden:
raise Forbidden(response)
elif response.status_code == codes.not_found:
raise NotFound(response)
elif response.status_code == codes.too_many_requests:
raise TooManyRequests(response)
else:
raise ResponseException(response)
def request(self, method, path, params=None):
params = deepcopy(params) or {}
url = urljoin(self.api_prefix, path)
return self._request(method, url, params)
``` |
{
"source": "jmhayesesq/Open-Chem",
"score": 3
} |
#### File: openchem/data/feature_data_layer.py
```python
import numpy as np
from torch.utils.data import Dataset
from openchem.data.utils import process_smiles
from openchem.data.utils import read_smiles_property_file
from openchem.data.utils import get_tokens, augment_smiles
class FeatureDataset(Dataset):
"""
Creates dataset for feature-property data.
Args:
filename (str): string with full path to dataset file. Dataset file
must be csv file.
cols_to_read (list): list specifying columns to read from dataset file.
Could be of various length, `cols_to_read[0]` will be used as index
as index for column with SMILES, `cols_to_read[1:]` will be used as
indices for labels values.
delimiter (str): columns delimiter in `filename`. `default` is `,`.
get_features (python function): python function to extract features from input data
get_feature_args (dict): additional parameters for get_features function
"""
def __init__(self,
filename,
cols_to_read,
get_features,
delimiter=',',
return_smiles=False,
get_features_args=None):
super(FeatureDataset, self).__init__()
self.return_smiles = return_smiles
self.get_features = get_features
data = read_smiles_property_file(filename, cols_to_read, delimiter)
if len(cols_to_read) > 1:
assert len(cols_to_read) == len(data)
smiles = data[0]
target = np.array(data[1:], dtype='float')
target = target.T
num_targets = len(cols_to_read) - 1
target = target.reshape((-1, num_targets))
else:
smiles = data[0]
target = None
self.target = target
features, valid_idx, invalid_idx = get_features(smiles, **get_features_args)
self.objects = [smiles[i] for i in valid_idx]
length = [len(sm) for sm in self.objects]
self.max_len = max(length)
self.data = features
def __len__(self):
return len(self.data)
def __getitem__(self, index):
sample = {}
if self.return_smiles:
object = self.objects[index]
object = object + " " * (self.max_len - len(object) + 1)
sample['object'] = np.array([ord(c) for c in object])
sample['features'] = self.data[index]
if self.target is not None:
sample['labels'] = self.target[index]
return sample
```
#### File: openchem/data/siamese_data_layer.py
```python
import numpy as np
import pickle
from torch.utils.data import Dataset
from openchem.data.utils import read_smiles_property_file
from openchem.data.utils import sanitize_smiles, pad_sequences, seq2tensor
from openchem.data.utils import get_tokens
from openchem.data.smiles_data_layer import SmilesDataset
from openchem.data.graph_data_layer import GraphDataset
class SiameseDataset(Dataset):
def __init__(self, filename, head1_type, head2_type, cols_to_read,
head1_arguments, head2_arguments):
super(SiameseDataset, self).__init__()
assert len(cols_to_read) == 3
if head1_type == "smiles":
cols_to_read = [cols_to_read[0]] + [cols_to_read[2]]
head1_dataset = SmilesDataset(filename,
cols_to_read=[0, 2],
**head1_arguments)
elif head1_type == "graphs":
raise NotImplementedError()
head1_dataset = GraphDataset(filename=filename,
cols_to_read=[0, 2],
**head1_arguments)
else:
raise ArgumentError
if head2_type == "smiles":
head2_dataset = SmilesDataset(filename,
cols_to_read=[1, 2],
**head2_arguments)
elif head2_type == "graphs":
raise NotImplementedError()
head2_dataset = GraphDataset(filename=filename,
cols_to_read=[1, 2],
**head2_arguments)
else:
raise ArgumentError
self.head1_dataset = head1_dataset
self.head2_dataset = head2_dataset
#assert len(head1_dataset) == len(head2_dataset)
self.target = head1_dataset.target
def __len__(self):
return len(self.target)
def __getitem__(self, index):
head1_sample = self.head1_dataset[index]
head2_sample = self.head2_dataset[index]
sample = {'head1': head1_sample,
'head2': head2_sample,
'labels': self.target[index]}
return sample
```
#### File: openchem/models/Graph2Label.py
```python
from openchem.models.openchem_model import OpenChemModel
import torch
class Graph2Label(OpenChemModel):
r"""
Creates a model that predicts one or multiple labels given object of
class graph as input. Consists of 'graph convolution neural network
encoder'__, followed by 'graph max pooling layer'__ and
multilayer perceptron.
__https://arxiv.org/abs/1609.02907
__https://pubs.acs.org/doi/full/10.1021/acscentsci.6b00367
Args:
params (dict): dictionary of parameters describing the model
architecture.
"""
def __init__(self, params):
super(Graph2Label, self).__init__(params)
self.encoder = self.params['encoder']
self.encoder_params = self.params['encoder_params']
self.Encoder = self.encoder(self.encoder_params, self.use_cuda)
self.mlp = self.params['mlp']
self.mlp_params = self.params['mlp_params']
self.MLP = self.mlp(self.mlp_params)
def forward(self, inp, eval=False):
if eval:
self.eval()
else:
self.train()
output = self.Encoder(inp)
output = self.MLP(output)
return output
@staticmethod
def cast_inputs(sample, task, use_cuda, for_prediction=False):
batch_adj = sample['adj_matrix'].to(torch.float)
batch_x = sample['node_feature_matrix'].to(torch.float)
if for_prediction and "object" in sample.keys():
batch_object = sample["object"]
else:
batch_object = None
if not for_prediction and 'labels' in sample.keys():
batch_labels = sample['labels']
if task == 'classification':
batch_labels = batch_labels.to(torch.long)
else:
batch_labels = batch_labels.to(torch.float)
else:
batch_labels = None
if use_cuda:
batch_x = batch_x.to(device='cuda')
batch_adj = batch_adj.to(device='cuda')
if batch_labels is not None:
batch_labels = batch_labels.to(device='cuda')
batch_inp = (batch_x, batch_adj)
if batch_object is not None:
return batch_inp, batch_object
else:
return batch_inp, batch_labels
```
#### File: openchem/models/vanilla_model.py
```python
from __future__ import print_function
from __future__ import division
import numpy as np
from sklearn.ensemble import RandomForestRegressor as RFR
from sklearn.ensemble import RandomForestClassifier as RFC
from sklearn.svm import SVC
from sklearn.svm import SVR
from sklearn.externals import joblib
from sklearn import metrics
from data import get_fp, get_desc, normalize_desc, cross_validation_split
from mordred import Calculator, descriptors
class RandomForestQSAR(object):
def __init__(self, model_type='classifier', feature_type='fingerprints', n_estimators=100, n_ensemble=5):
super(RandomForestQSAR, self).__init__()
self.n_estimators = n_estimators
self.n_ensemble = n_ensemble
self.model = []
self.model_type = model_type
if self.model_type == 'classifier':
for i in range(n_ensemble):
self.model.append(RFC(n_estimators=n_estimators))
elif self.model_type == 'regressor':
for i in range(n_ensemble):
self.model.append(RFR(n_estimators=n_estimators))
else:
raise ValueError('invalid value for argument')
self.feature_type = feature_type
if self.feature_type == 'descriptors':
self.calc = Calculator(descriptors, ignore_3D=True)
self.desc_mean = [0] * self.n_ensemble
def load_model(self, path):
self.model = []
for i in range(self.n_ensemble):
m = joblib.load(path + str(i) + '.pkl')
self.model.append(m)
if self.feature_type == 'descriptors':
arr = np.load(path + 'desc_mean.npy', 'rb')
self.desc_mean = arr
def save_model(self, path):
assert self.n_ensemble == len(self.model)
for i in range(self.n_ensemble):
joblib.dump(self.model[i], path + str(i) + '.pkl')
if self.feature_type == 'descriptors':
np.save(path + 'desc_mean.npy', self.desc_mean)
def fit_model(self, data):
eval_metrics = []
if self.feature_type == 'fingerprints':
fps = get_fp(data.smiles)
elif self.feature_type == 'descriptors':
fps, _, _ = get_desc(data.smiles, self.calc)
if self.model_type == 'classifier':
cross_val_data, cross_val_labels = \
cross_validation_split(fps, data.binary_labels)
elif self.model_type == 'regressor':
cross_val_data, cross_val_labels = \
cross_validation_split(fps, data.property)
for i in range(self.n_ensemble):
train_sm = np.concatenate(cross_val_data[:i] + cross_val_data[(i + 1):])
test_sm = cross_val_data[i]
train_labels = np.concatenate(cross_val_labels[:i] + cross_val_labels[(i + 1):])
test_labels = cross_val_labels[i]
if self.feature_type == 'descriptors':
train_sm, desc_mean = normalize_desc(train_sm)
self.desc_mean[i] = desc_mean
test_sm, _ = normalize_desc(test_sm, desc_mean)
self.model[i].fit(train_sm, train_labels.ravel())
predicted = self.model[i].predict(test_sm)
if self.model_type == 'classifier':
fpr, tpr, thresholds = metrics.roc_curve(test_labels, predicted)
eval_metrics.append(metrics.auc(fpr, tpr))
metrics_type = 'AUC'
elif self.model_type == 'regressor':
r2 = metrics.r2_score(test_labels, predicted)
eval_metrics.append(r2)
metrics_type = 'R^2 score'
return eval_metrics, metrics_type
def predict(self, smiles, average=True):
if self.feature_type == 'fingerprints':
fps = get_fp(smiles)
assert len(smiles) == len(fps)
clean_smiles = []
clean_fps = []
nan_smiles = []
for i in range(len(fps)):
if np.isnan(sum(fps[i])):
nan_smiles.append(smiles[i])
else:
clean_smiles.append(smiles[i])
clean_fps.append(fps[i])
clean_fps = np.array(clean_fps)
elif self.feature_type == 'descriptors':
clean_fps, clean_smiles, nan_smiles = get_desc(smiles, self.calc)
prediction = []
if len(clean_fps) > 0:
for i in range(self.n_ensemble):
m = self.model[i]
if self.feature_type == 'descriptors':
clean_fps, _ = normalize_desc(clean_fps, self.desc_mean[i])
prediction.append(m.predict(clean_fps))
prediction = np.array(prediction)
if average:
prediction = prediction.mean(axis=0)
assert len(clean_smiles) == len(prediction)
return clean_smiles, prediction, nan_smiles
class SVMQSAR(object):
def __init__(self, model_type='classifier', n_ensemble=5):
super(SVMQSAR, self).__init__()
self.n_ensemble = n_ensemble
self.model = []
self.model_type = model_type
if self.model_type == 'classifier':
for i in range(n_ensemble):
self.model.append(SVC())
elif self.model_type == 'regressor':
for i in range(n_ensemble):
self.model.append(SVR())
else:
raise ValueError('invalid value for argument')
def load_model(self, path):
self.model = []
for i in range(self.n_ensemble):
m = joblib.load(path + str(i) + '.pkl')
self.model.append(m)
def save_model(self, path):
assert self.n_ensemble == len(self.model)
for i in range(self.n_ensemble):
joblib.dump(self.model[i], path + str(i) + '.pkl')
def fit_model(self, data, cross_val_data, cross_val_labels):
eval_metrics = []
for i in range(self.n_ensemble):
train_sm = np.concatenate(cross_val_data[:i] + cross_val_data[(i + 1):])
test_sm = cross_val_data[i]
train_labels = np.concatenate(cross_val_labels[:i] + cross_val_labels[(i + 1):])
test_labels = cross_val_labels[i]
fp_train = get_fp(train_sm)
fp_test = get_fp(test_sm)
self.model[i].fit(fp_train, train_labels.ravel())
predicted = self.model[i].predict(fp_test)
if self.model_type == 'classifier':
fpr, tpr, thresholds = metrics.roc_curve(test_labels, predicted)
eval_metrics.append(metrics.auc(fpr, tpr))
metrics_type = 'AUC'
elif self.model_type == 'regressor':
r2 = metrics.r2_score(test_labels, predicted)
eval_metrics.append(r2)
metrics_type = 'R^2 score'
return eval_metrics, metrics_type
def predict(self, smiles, average=True):
fps = get_fp(smiles)
assert len(smiles) == len(fps)
clean_smiles = []
clean_fps = []
nan_smiles = []
for i in range(len(fps)):
if np.isnan(sum(fps[i])):
nan_smiles.append(smiles[i])
else:
clean_smiles.append(smiles[i])
clean_fps.append(fps[i])
clean_fps = np.array(clean_fps)
prediction = []
if len(clean_fps) > 0:
for m in self.model:
prediction.append(m.predict(clean_fps))
prediction = np.array(prediction)
if average:
prediction = prediction.mean(axis=0)
assert len(clean_smiles) == len(prediction)
return clean_smiles, prediction, nan_smiles
```
#### File: modules/embeddings/onehot_embedding.py
```python
import torch
from openchem.modules.embeddings.openchem_embedding import OpenChemEmbedding
class OneHotEmbedding(OpenChemEmbedding):
def __init__(self, params):
super(OneHotEmbedding, self).__init__(params)
if self.padding_idx is not None:
weight = torch.eye(self.num_embeddings - 1)
zero_row = torch.zeros(self.num_embeddings - 1).unsqueeze(0)
weight = torch.cat([weight[:self.padding_idx], zero_row, weight[self.padding_idx:]], dim=0)
else:
weight = torch.eye(self.num_embeddings)
self.weight = torch.nn.Parameter(weight, requires_grad=False)
def forward(self, inp):
embedded = self.weight[inp]
return embedded
```
#### File: modules/encoders/rnn_encoder.py
```python
import torch
import numpy as np
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from openchem.modules.encoders.openchem_encoder import OpenChemEncoder
from openchem.utils.utils import check_params
class RNNEncoder(OpenChemEncoder):
def __init__(self, params, use_cuda):
super(RNNEncoder, self).__init__(params, use_cuda)
check_params(params, self.get_required_params(), self.get_optional_params())
self.layer = self.params['layer']
layers = ['LSTM', 'GRU', 'RNN']
if self.layer not in ['LSTM', 'GRU', 'RNN']:
raise ValueError(self.layer + ' is invalid value for argument'
' \'layer\'. Choose one from :' + ', '.join(layers))
self.input_size = self.params['input_size']
self.encoder_dim = self.params['encoder_dim']
self.n_layers = self.params['n_layers']
if self.n_layers > 1:
self.dropout = self.params['dropout']
else:
UserWarning('dropout can be non zero only when n_layers > 1. ' 'Parameter dropout set to 0.')
self.dropout = 0
self.bidirectional = self.params['is_bidirectional']
if self.bidirectional:
self.n_directions = 2
else:
self.n_directions = 1
if self.layer == 'LSTM':
self.rnn = nn.LSTM(self.input_size,
self.encoder_dim,
self.n_layers,
bidirectional=self.bidirectional,
dropout=self.dropout,
batch_first=True)
elif self.layer == 'GRU':
self.rnn = nn.GRU(self.input_size,
self.encoder_dim,
self.n_layers,
bidirectional=self.bidirectional,
dropout=self.dropout,
batch_first=True)
else:
self.layer = nn.RNN(self.input_size,
self.encoder_dim,
self.n_layers,
bidirectional=self.bidirectional,
dropout=self.dropout,
batch_first=True)
@staticmethod
def get_required_params():
return {
'input_size': int,
'encoder_dim': int,
}
@staticmethod
def get_optional_params():
return {'layer': str, 'n_layers': int, 'dropout': float, 'is_bidirectional': bool}
def forward(self, inp, previous_hidden=None, pack=True):
"""
inp: shape batch_size, seq_len, input_size
previous_hidden: if given shape n_layers * num_directions,
batch_size, embedding_dim.
Initialized automatically if None
return: embedded
"""
input_tensor = inp[0]
input_length = inp[1]
batch_size = input_tensor.size(0)
# TODO: warning: output shape is changed! (batch_first=True) Check hidden
if pack:
input_lengths_sorted, perm_idx = torch.sort(input_length, dim=0, descending=True)
input_lengths_sorted = input_lengths_sorted.detach().to(device="cpu").tolist()
input_tensor = torch.index_select(input_tensor, 0, perm_idx)
rnn_input = pack_padded_sequence(input=input_tensor,
lengths=input_lengths_sorted,
batch_first=True)
else:
rnn_input = input_tensor
if previous_hidden is None:
previous_hidden = self.init_hidden(batch_size)
if self.layer == 'LSTM':
cell = self.init_cell(batch_size)
previous_hidden = (previous_hidden, cell)
else:
if self.layer == 'LSTM':
hidden = previous_hidden[0]
cell = previous_hidden[1]
hidden = torch.index_select(hidden, 1, perm_idx)
cell = torch.index_select(cell, 1, perm_idx)
previous_hidden = (hidden, cell)
else:
previous_hidden = torch.index_select(previous_hidden, 1, perm_idx)
rnn_output, next_hidden = self.rnn(rnn_input) # , previous_hidden)
if pack:
rnn_output, _ = pad_packed_sequence(rnn_output, batch_first=True)
_, unperm_idx = perm_idx.sort(0)
rnn_output = torch.index_select(rnn_output, 0, unperm_idx)
if self.layer == 'LSTM':
hidden = next_hidden[0]
cell = next_hidden[1]
hidden = torch.index_select(hidden, 1, unperm_idx)
cell = torch.index_select(cell, 1, unperm_idx)
next_hidden = (hidden, cell)
else:
next_hidden = torch.index_select(next_hidden, 1, unperm_idx)
index_t = (input_length - 1).to(dtype=torch.long)
index_t = index_t.view(-1, 1, 1).expand(-1, 1, rnn_output.size(2))
embedded = torch.gather(rnn_output, dim=1, index=index_t).squeeze(1)
return embedded, next_hidden
def init_hidden(self, batch_size):
if self.use_cuda:
device = torch.device("cuda")
else:
device = torch.device("cpu")
return torch.zeros(self.n_layers * self.n_directions, batch_size, self.encoder_dim, device=device)
def init_cell(self, batch_size):
if self.use_cuda:
device = torch.device("cuda")
else:
device = torch.device("cpu")
return torch.zeros(self.n_layers * self.n_directions, batch_size, self.encoder_dim, device=device)
```
#### File: modules/mlp/openchem_mlp.py
```python
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
from openchem.utils.utils import check_params
class OpenChemMLP(nn.Module):
"""Base class for MLP module"""
def __init__(self, params):
super(OpenChemMLP, self).__init__()
check_params(params, self.get_required_params(), self.get_optional_params())
self.params = params
self.hidden_size = self.params['hidden_size']
self.input_size = [self.params['input_size']] + self.hidden_size[:-1]
self.n_layers = self.params['n_layers']
self.activation = self.params['activation']
if type(self.activation) is list:
assert len(self.activation) == self.n_layers
else:
self.activation = [self.activation] * self.n_layers
if 'dropout' in self.params.keys():
self.dropout = self.params['dropout']
else:
self.dropout = 0
self.layers = nn.ModuleList([])
self.bn = nn.ModuleList([])
self.dropouts = nn.ModuleList([])
for i in range(self.n_layers - 1):
self.dropouts.append(nn.Dropout(self.dropout))
self.bn.append(nn.BatchNorm1d(self.hidden_size[i]))
self.layers.append(nn.Linear(in_features=self.input_size[i], out_features=self.hidden_size[i]))
i = self.n_layers - 1
self.dropouts.append(nn.Dropout(self.dropout))
self.layers.append(nn.Linear(in_features=self.input_size[i], out_features=self.hidden_size[i]))
@staticmethod
def get_required_params():
return {
'input_size': int,
'n_layers': int,
'hidden_size': list,
'activation': None,
}
@staticmethod
def get_optional_params():
return {'dropout': float}
def forward(self, inp):
output = inp
for i in range(self.n_layers - 1):
output = self.dropouts[i](output)
output = self.layers[i](output)
output = self.bn[i](output)
output = self.activation[i](output)
output = self.dropouts[-1](output)
output = self.layers[-1](output)
output = self.activation[-1](output)
return output
class OpenChemMLPSimple(nn.Module):
"""Base class for MLP module"""
def __init__(self, params):
super(OpenChemMLPSimple, self).__init__()
check_params(params, self.get_required_params(), self.get_optional_params())
self.params = params
self.hidden_size = self.params['hidden_size']
self.input_size = [self.params['input_size']] + self.hidden_size[:-1]
self.n_layers = self.params['n_layers']
self.activation = self.params['activation']
if type(self.activation) is list:
assert len(self.activation) == self.n_layers
else:
self.activation = [self.activation] * self.n_layers
self.layers = nn.ModuleList([])
for i in range(self.n_layers):
self.layers.append(nn.Linear(in_features=self.input_size[i], out_features=self.hidden_size[i]))
if "init" in self.params.keys():
if self.params["init"] == "xavier_uniform":
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain('relu'))
else:
raise NotImplementedError("Only xavier_uniform "
"initialization is "
"supported now in OpenChemMLPSimple")
@staticmethod
def get_required_params():
return {
'input_size': int,
'n_layers': int,
'hidden_size': list,
'activation': None,
}
@staticmethod
def get_optional_params():
return {'init': str}
def forward(self, inp):
output = inp
for i in range(self.n_layers):
output = self.layers[i](output)
output = self.activation[i](output)
return output
```
#### File: openchem/utils/metrics.py
```python
from rdkit import Chem
from rdkit.Chem import QED
import numpy as np
import networkx as nx
from openchem.utils.sa_score import sascorer
from rdkit.Chem import Descriptors
def reward_penalized_log_p(smiles, return_mean=True):
"""
Reward that consists of log p penalized by SA and # long cycles,
as described in (Kusner et al. 2017). Scores are normalized based on the
statistics of 250k_rndm_zinc_drugs_clean.smi dataset
:param mol: rdkit mol object
:return: float
"""
# normalization constants, statistics from 250k_rndm_zinc_drugs_clean.smi
logP_mean = 2.4570953396190123
logP_std = 1.434324401111988
SA_mean = -3.0525811293166134
SA_std = 0.8335207024513095
cycle_mean = -0.0485696876403053
cycle_std = 0.2860212110245455
mols = [Chem.MolFromSmiles(sm) for sm in smiles]
log_p = np.array([Chem.Descriptors.MolLogP(mol) for mol in mols])
SA = -np.array(sa_score(smiles, return_mean=False))
# cycle score
cycle_score = []
for mol in mols:
cycle_list = nx.cycle_basis(nx.Graph(Chem.rdmolops.GetAdjacencyMatrix(mol)))
if len(cycle_list) == 0:
cycle_length = 0
else:
cycle_length = max([len(j) for j in cycle_list])
if cycle_length <= 6:
cycle_length = 0
else:
cycle_length = cycle_length - 6
cycle_score.append(-cycle_length)
cycle_score = np.array(cycle_score)
normalized_log_p = (log_p - logP_mean) / logP_std
normalized_SA = (SA - SA_mean) / SA_std
normalized_cycle = (cycle_score - cycle_mean) / cycle_std
score = list(normalized_log_p + normalized_SA + normalized_cycle)
if return_mean:
return np.mean(score)
else:
return score
def logP_pen(smiles, return_mean=True):
mols = [Chem.MolFromSmiles(s) for s in smiles]
logp_pen = []
for mol in mols:
cycle_list = nx.cycle_basis(nx.Graph(Chem.rdmolops.GetAdjacencyMatrix(mol)))
tmp = sum([len(c) > 6 for c in cycle_list])
logp_pen.append(Descriptors.MolLogP(mol) - sascorer.calculateScore(mol) - tmp)
if return_mean:
return np.mean(logp_pen)
else:
return logp_pen
def logP(smiles, return_mean=True):
mols = [Chem.MolFromSmiles(s) for s in smiles]
clean_idx = [m is not None for m in mols]
clean_idx = list(np.where(clean_idx)[0])
clean_mols = [mols[i] for i in clean_idx]
if len(clean_mols) > 0:
score = [Chem.Crippen.MolLogP(mol) for mol in clean_mols]
else:
score = -10.0
if return_mean:
return np.mean(score)
else:
return score
def qed(smiles, return_mean=True):
mols = [Chem.MolFromSmiles(s) for s in smiles]
clean_idx = [m is not None for m in mols]
clean_idx = list(np.where(clean_idx)[0])
clean_mols = [mols[i] for i in clean_idx]
if len(clean_mols) > 0:
score = [QED.qed(mol) for mol in clean_mols]
else:
score = -1.0
if return_mean:
return np.mean(score)
else:
return score
def sa_score(smiles, return_mean=True):
mols = [Chem.MolFromSmiles(s) for s in smiles]
clean_idx = [m is not None for m in mols]
clean_idx = list(np.where(clean_idx)[0])
clean_mols = [mols[i] for i in clean_idx]
if len(clean_mols) > 0:
score = [sascorer.calculateScore(m) for m in clean_mols]
else:
score = -1.0
if return_mean:
return np.mean(score)
else:
return score
```
#### File: openchem/utils/utils_3d.py
```python
import numpy as np
def distance_matrix(xyzarr):
npart, ncoord = xyzarr.shape
dist_mat = np.zeros([npart, npart])
for i in range(npart):
for j in range(0, i):
rvec = xyzarr[i] - xyzarr[j]
dist_mat[i][j] = dist_mat[j][i] = np.sqrt(np.dot(rvec, rvec))
return dist_mat
def angle(xyzarr, i, j, k):
rij = xyzarr[i] - xyzarr[j]
rkj = xyzarr[k] - xyzarr[j]
cos_theta = np.dot(rij, rkj)
sin_theta = np.linalg.norm(np.cross(rij, rkj))
theta = np.arctan2(sin_theta, cos_theta)
theta = 180.0 * theta / np.pi
return theta
def dihedral(xyzarr, i, j, k, l):
rji = xyzarr[j] - xyzarr[i]
rkj = xyzarr[k] - xyzarr[j]
rlk = xyzarr[l] - xyzarr[k]
v1 = np.cross(rji, rkj)
v1 = v1 / np.linalg.norm(v1)
v2 = np.cross(rlk, rkj)
v2 = v2 / np.linalg.norm(v2)
m1 = np.cross(v1, rkj) / np.linalg.norm(rkj)
x = np.dot(v1, v2)
y = np.dot(m1, v2)
chi = np.arctan2(y, x)
chi = -180.0 - 180.0 * chi / np.pi
if (chi < -180.0):
chi = chi + 360.0
return chi
def calculate_zmat(xyzarr):
distmat = distance_matrix(xyzarr)
npart, ncoord = xyzarr.shape
r_list = []
a_list = []
d_list = []
r_connect = []
a_connect = []
d_connect = []
if npart > 0:
if npart > 1:
r_list.append(distmat[0][1])
r_connect.append(1)
if npart > 2:
r_list.append(distmat[0][2])
a_list.append(angle(xyzarr, 2, 0, 1))
r_connect.append(1)
a_connect.append(2)
if npart > 3:
for i in range(3, npart):
r_list.append(distmat[i-3][i])
a_list.append(angle(xyzarr, i, i-3, i-2))
d_list.append(dihedral(xyzarr, i, i-3, i-2, i-1))
r_connect.append(i-2)
a_connect.append(i-1)
d_connect.append(i)
return r_list, a_list, d_list, r_connect, a_connect, d_connect
def calculate_xyz(radii, angles, dihedrals, r_connect, a_connect, d_connect):
n_atoms = len(radii) + 1
xyz_coord = np.zeros([n_atoms, 3])
if (n_atoms > 1):
xyz_coord[1] = [radii[0], 0.0, 0.0]
if (n_atoms > 2):
i = r_connect[1] - 1
j = a_connect[0] - 1
r = radii[1]
theta = angles[0] * np.pi / 180.0
x = r * np.cos(theta)
y = r * np.sin(theta)
a_i = xyz_coord[i]
b_ij = xyz_coord[j] - xyz_coord[i]
if (b_ij[0] < 0):
x = a_i[0] - x
y = a_i[1] - y
else:
x = a_i[0] + x
y = a_i[1] + y
xyz_coord[2] = [x, y, 0.0]
for n in range(3, n_atoms):
r = radii[n-1]
theta = angles[n-2] * np.pi / 180.0
phi = dihedrals[n-3] * np.pi / 180.0
sinTheta = np.sin(theta)
cosTheta = np.cos(theta)
sinPhi = np.sin(phi)
cosPhi = np.cos(phi)
x = r * cosTheta
y = r * cosPhi * sinTheta
z = r * sinPhi * sinTheta
i = r_connect[n-1] - 1
j = a_connect[n-2] - 1
k = d_connect[n-3] - 1
a = xyz_coord[k]
b = xyz_coord[j]
c = xyz_coord[i]
ab = b - a
bc = c - b
bc = bc / np.linalg.norm(bc)
nv = np.cross(ab, bc)
nv = nv / np.linalg.norm(nv)
ncbc = np.cross(nv, bc)
new_x = c[0] - bc[0] * x + ncbc[0] * y + nv[0] * z
new_y = c[1] - bc[1] * x + ncbc[1] * y + nv[1] * z
new_z = c[2] - bc[2] * x + ncbc[2] * y + nv[2] * z
xyz_coord[n] = [new_x, new_y, new_z]
return xyz_coord
``` |
{
"source": "jmhbnz/gitlab-group-fork",
"score": 3
} |
#### File: jmhbnz/gitlab-group-fork/gitlab_group_fork.py
```python
import sys
import os
import argparse
import logging
import gitlab
from treelib import Tree
class GitLabInfo():
"""Custom Data Type to hold data returned about group or project"""
def __init__(
self, gitlab_id="", name="",
path="", description="", full_path="", new_id=""):
self.gitlab_id = gitlab_id
self.name = name
self.path = path
self.description = description
self.full_path = full_path
self.new_id = new_id
def main():
"""Main Function"""
logging.basicConfig(level=logging.ERROR)
options = parse_cli()
glab = gitlab.Gitlab(options.url, options.token)
src_group_tree = read_src_group(glab, options.srcGroup)
dest_group_tree = create_dest_group(glab, options.destGroup, src_group_tree)
count_of_projects = fork_projects(glab, src_group_tree, dest_group_tree)
print(f"Forked {count_of_projects} Projects into {len(dest_group_tree)} Groups")
def parse_cli():
"""Parse CLI Options and return, fail on no valid token"""
logging.debug('parse_cli: Parsing CLI Arguments')
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--url",
dest="url",
default="https://gitlab.com",
help="base URL of the GitLab instance")
parser.add_argument("-t", "--token",
dest="token",
default='',
help="API token")
parser.add_argument("srcGroup", help="Source namespace to copy")
parser.add_argument("destGroup", help="Destination namespace to create")
options = parser.parse_args()
if options.token == "":
logging.debug('parse_cli: Did not find token in cli options')
if os.getenv("GITLAB_TOKEN") is not None:
logging.debug('parse_cli: Found token in system environment variable GITLAB_TOKEN')
options.token = os.getenv("GITLAB_TOKEN")
else:
logging.error('parse_cli: Did not find token in environment variable, quitting')
print("API Token Required - Not found in options or environment variables")
sys.exit(1)
return options
def add_new_group(gitlab_group_obj: gitlab.Gitlab) -> Tree:
"""Function to add new GitLab Group to Tree Object"""
group_tree = Tree()
group_tree.create_node(
gitlab_group_obj.path,
gitlab_group_obj.id,
data=GitLabInfo(
gitlab_id=gitlab_group_obj.id,
name=gitlab_group_obj.name,
full_path=gitlab_group_obj.full_path,
path=gitlab_group_obj.path,
description=gitlab_group_obj.description))
return group_tree
def read_src_group(glab, src):
"""Read source group tree from gitlab server"""
logging.info("Attemping to read source group '%s/%s'", glab.url, src)
src_group_tree = Tree()
top_level_group = glab.groups.get(src, include_subgroups=True)
src_group_tree = add_new_group(top_level_group) # For root node
def get_sub_groups(parent):
logging.debug('Looking for sub-groups in %s', parent.full_path)
new_top = glab.groups.get(parent.id, include_subgroups=True)
subgroups = new_top.subgroups.list(all_available=True)
for sub in subgroups:
logging.debug('Found sub-group %s', sub.full_path)
src_group_tree.paste(new_top.id, add_new_group(sub))
logging.debug('Added node to tree with name %s and id %s', sub.path, sub.id)
new_parent = glab.groups.get(sub.id, include_subgroups=True)
new_subgroup = new_parent.subgroups.list(all_available=True)
for child in new_subgroup:
logging.debug('Traversing group %s', child.full_path)
src_group_tree.paste(new_parent.id, add_new_group(child))
get_sub_groups(child)
get_sub_groups(top_level_group)
logging.info('Found %s sub-groups', len(src_group_tree)-1)
print("Source Groups[group_id]:")
src_group_tree.show(idhidden=False)
return src_group_tree
def create_dest_group(glab, dest, src_group_tree):
"""Create destination group structure"""
if '/' in dest:
logging.error('SubGroup as destination not supported "%s"', dest)
sys.exit(1)
dest_group_tree = Tree()
logging.info('Attempting to create destination group at %s/%s', glab.url, dest)
try:
top_level_group = glab.groups.create({'name': dest, 'path': dest})
logging.info('Group Created at %s/%s', glab.url, top_level_group.full_path)
dest_group_tree = add_new_group(top_level_group) # For root node
src_group_tree.update_node(src_group_tree.root, data=GitLabInfo(new_id=top_level_group.id))
except gitlab.exceptions.GitlabCreateError as err:
logging.error('Group Cannot be created: %s', err)
sys.exit(1)
except:
logging.debug('An error occurred')
raise
for grp in src_group_tree.expand_tree():
if src_group_tree.level(grp) == 0:
continue
new_parent = src_group_tree.get_node(src_group_tree.parent(grp).identifier).data.new_id
logging.debug('Creating Group "%s" with Path "%s" and Parent ID "%s"',
src_group_tree.get_node(grp).data.name,
src_group_tree.get_node(grp).data.path,
new_parent)
new_group = glab.groups.create(
{'name': src_group_tree.get_node(grp).data.name,
'path': src_group_tree.get_node(grp).data.path,
'parent_id': new_parent,
'description': src_group_tree.get_node(grp).data.description})
src_group_tree.update_node(grp, data=GitLabInfo(new_id=new_group.id))
dest_group_tree.paste(new_parent, add_new_group(new_group))
logging.info('Created %s sub-groups', len(dest_group_tree)-1)
print("Destination Groups[group_id]:")
dest_group_tree.show(idhidden=False)
return dest_group_tree
def fork_projects(glab, src_group_tree, dest_group_tree):
"""Fork the projects in source groups into destination groups"""
count = 0
logging.debug('Attempting to fork projects from %s to %s',
src_group_tree.get_node(src_group_tree.root).tag,
dest_group_tree.get_node(dest_group_tree.root).tag)
for grp in src_group_tree.expand_tree():
gitlab_grp = glab.groups.get(grp)
projects = gitlab_grp.projects.list()
for project in projects:
gitlab_prj = glab.projects.get(project.id)
new_namespace = src_group_tree.get_node(grp).data.new_id
logging.debug('Forking project "%s" into namepace "%s"', gitlab_prj.name, new_namespace)
gitlab_prj.forks.create({'namespace': new_namespace})
print(f"Forked project: {gitlab_prj.name}")
count = count + 1
return count
if __name__ == "__main__":
main()
logging.debug('End of Script')
``` |
{
"source": "jmhelt/zipf",
"score": 3
} |
#### File: zipf/test/bench.py
```python
import argparse
import os
import pathlib
import shlex
import subprocess
def parse_args():
parser = argparse.ArgumentParser(description="Plot distributions")
parser.add_argument("-b", "--builddir", type=pathlib.Path, required=True,
help="path to build directory")
return parser.parse_args()
def main():
args = parse_args()
test_bin_path = os.path.join(args.builddir, "bin", "bench")
test_bin_path = os.path.abspath(test_bin_path)
subprocess.call(shlex.split(test_bin_path))
if __name__ == "__main__":
main()
```
#### File: zipf/test/plot.py
```python
import argparse
import os
import pathlib
import shlex
import shutil
import subprocess
NUM_SAMPLES = 100000
def parse_args():
parser = argparse.ArgumentParser(description="Plot distributions")
parser.add_argument("-b", "--builddir", type=pathlib.Path, required=True,
help="path to build directory")
return parser.parse_args()
def get_out_fname(dir_path, generator, num_elements, skew):
fname = "{}_{}_{}.csv".format(generator, num_elements, skew)
return os.path.join(dir_path, fname)
def get_gp_fname(dir_path, num_elements):
return os.path.join(dir_path, "plot-{}.gp".format(num_elements))
def gen_gp_file(dir_path, generators, skews, num_elementss):
template = """
set terminal png
set logscale x
set xlabel "Log(rank)"
set ylabel "CDF"
set output "cdf-{0}.png"
set title "CDF for {0} Keys"
set key below
plot \\
"""
for num_elements in num_elementss:
contents = template.format(num_elements)
for generator in generators:
for skew in skews:
out_fname = get_out_fname(dir_path, generator,
num_elements, skew)
contents += ' "{}" using 1:2 smooth cumulative title "{} s={}" with linespoint, \\\n' \
.format(out_fname, generator, skew)
contents = contents[0:len(contents)-4]
# print(contents)
fname = get_gp_fname(dir_path, num_elements)
with open(fname, "w") as f:
f.write(contents)
def main():
args = parse_args()
test_bin_path = os.path.join(args.builddir, "bin", "test")
test_bin_path = os.path.abspath(test_bin_path)
test_dir_path = os.path.dirname(os.path.realpath(__file__))
test_out_path = os.path.join(test_dir_path, "out")
shutil.rmtree(test_out_path, ignore_errors=True)
os.makedirs(test_out_path)
num_elementss = [2, 10]
generators = ["ycsb", "rejinv"]
skews = [0.1, 0.5, 0.99, 2.0, 3.0, 4.0]
for num_elements in num_elementss:
for generator in generators:
for skew in skews:
out_fname = get_out_fname(test_out_path, generator,
num_elements, skew)
cmd = "{} -g {} -e {} -s {} -n {}".format(test_bin_path,
generator, num_elements,
skew, NUM_SAMPLES)
print("{} > {}".format(cmd, out_fname))
with open(out_fname, "w") as f:
subprocess.call(shlex.split(cmd), stdout=f)
gen_gp_file(test_out_path, generators, skews, num_elementss)
for num_elements in num_elementss:
cmd = "gnuplot {}".format(get_gp_fname(test_out_path, num_elements))
print(cmd)
subprocess.call(shlex.split(cmd), cwd=test_out_path)
if __name__ == "__main__":
main()
``` |
{
"source": "jmherbst/GimelStudio",
"score": 2
} |
#### File: src/gimelstudio/config.py
```python
import os
import json
import gimelstudio.constants as appconst
class AppData(object):
def __init__(self):
self.app_frozen = appconst.APP_FROZEN
self.app_dir = appconst.APP_DIR
self.app_name = appconst.APP_NAME
self.app_website_url = appconst.APP_WEBSITE_URL
self.app_description = appconst.APP_DESCRIPTION
self.app_version = appconst.APP_VERSION
self.app_version_tag = appconst.APP_VERSION_TAG
self.app_version_full = appconst.APP_VERSION_FULL
class AppConfiguration(AppData):
def __init__(self, app):
AppData.__init__(self)
self.app = app
self.prefs = {}
def Config(self, key=None, value=None):
if value is not None and value is not None:
self.prefs[key] = value
else:
return self.prefs[key]
def Load(self):
path = os.path.expanduser("~/.gimelstudio/config.json")
try:
os.makedirs(
os.path.expanduser("~/.gimelstudio/"), exist_ok=True)
with open(path, "r") as file:
self.prefs = json.load(file)
except IOError:
pass # Just use default
def Save(self):
# Add app version to file
self.prefs['app_version'] = self.app_version
path = "~/.gimelstudio/config.json"
try:
with open(
os.path.expanduser(path), "w") as file:
json.dump(self.prefs, file)
except IOError:
pass # Not a big deal
```
#### File: core/node/property.py
```python
import os
import sys
import glob
import wx
from gswidgetkit import (NumberField, EVT_NUMBERFIELD,
Button, EVT_BUTTON, TextCtrl,
DropDown, EVT_DROPDOWN)
from gimelstudio import constants
from gimelstudio.datafiles import ICON_ARROW_DOWN, ICON_ARROW_RIGHT
# Enum-like constants for widgets
SLIDER_WIDGET = "slider"
SPINBOX_WIDGET = "spinbox"
class Property(object):
"""
The base node property class.
"""
def __init__(self, idname, default, label, visible=True):
self.idname = idname
self.value = default
self.label = label
self.visible = visible
self.widget_eventhook = None
def _RunErrorCheck(self):
pass
@property
def IdName(self): # name
return self.idname
def GetIdname(self):
return self.idname
def GetValue(self):
return self.value
def SetValue(self, value, render=True):
""" Set the value of the node property.
NOTE: This is only to be used to AFTER the node init.
Use ``self.EditProperty`` for other cases, instead.
"""
self.value = value
self._RunErrorCheck()
self.WidgetEventHook(self.idname, self.value, render)
def GetLabel(self):
return self.label
def SetLabel(self, label):
self.label = label
def GetIsVisible(self):
return self.visible
def SetIsVisible(self, is_visible):
self.visible = is_visible
def SetWidgetEventHook(self, event_hook):
self.widget_eventhook = event_hook
def WidgetEventHook(self, idname, value, render):
self.widget_eventhook(idname, value, render)
def CreateFoldPanel(self, panel_bar, label=None):
images = wx.ImageList(24, 24)
images.Add(ICON_ARROW_DOWN.GetBitmap())
images.Add(ICON_ARROW_RIGHT.GetBitmap())
if label is None:
lbl = self.GetLabel()
else:
lbl = label
return panel_bar.AddFoldPanel(lbl, foldIcons=images)
def AddToFoldPanel(self, panel_bar, fold_panel, item, spacing=10):
# From https://discuss.wxpython.org/t/how-do-you-get-the-
# captionbar-from-a-foldpanelbar/24795
fold_panel._captionBar.SetSize(fold_panel._captionBar.DoGetBestSize())
panel_bar.AddFoldPanelWindow(fold_panel, item, spacing=spacing)
class PositiveIntegerProp(Property):
""" Allows the user to select a positive integer. """
def __init__(self, idname, default=0, lbl_suffix="", min_val=0,
max_val=10, widget="slider", label="", visible=True):
Property.__init__(self, idname, default, label, visible)
self.min_value = min_val
self.max_value = max_val
self.widget = widget
self.lbl_suffix = lbl_suffix
self._RunErrorCheck()
def _RunErrorCheck(self):
if self.value > self.max_value:
raise TypeError(
"PositiveIntegerField value must be set to an integer less than 'max_val'"
)
if self.value < self.min_value:
raise TypeError(
"PositiveIntegerField value must be set to an integer greater than 'min_val'"
)
def GetMinValue(self):
return self.min_value
def GetMaxValue(self):
return self.max_value
def CreateUI(self, parent, sizer):
fold_panel = self.CreateFoldPanel(sizer)
fold_panel.SetBackgroundColour(wx.Colour("#464646"))
self.numberfield = NumberField(fold_panel,
default_value=self.GetValue(),
label=self.GetLabel(),
min_value=self.GetMinValue(),
max_value=self.GetMaxValue(),
suffix=self.lbl_suffix, show_p=False,
size=(-1, 32))
self.AddToFoldPanel(sizer, fold_panel, self.numberfield, spacing=10)
self.numberfield.Bind(EVT_NUMBERFIELD, self.WidgetEvent)
def WidgetEvent(self, event):
self.SetValue(event.value)
class ChoiceProp(Property):
""" Allows the user to select from a list of choices. """
def __init__(self, idname, default="", choices=[], label="", visible=True):
Property.__init__(self, idname, default, label, visible)
self.choices = choices
self._RunErrorCheck()
def GetChoices(self):
return self.choices
def SetChoices(self, choices=[]):
self.choices = choices
def CreateUI(self, parent, sizer):
fold_panel = self.CreateFoldPanel(sizer)
fold_panel.SetBackgroundColour(wx.Colour("#464646"))
self.dropdown = DropDown(
fold_panel,
default=self.GetValue(),
items=self.GetChoices(),
size=(-1, 32)
)
self.AddToFoldPanel(sizer, fold_panel, self.dropdown, spacing=10)
self.dropdown.Bind(EVT_DROPDOWN, self.WidgetEvent)
def WidgetEvent(self, event):
value = event.value
if not value:
print("Value is null!")
self.SetValue(value)
class OpenFileChooserProp(Property):
""" Allows the user to select a file to open.
(e.g: use this to open an .PNG, .JPG, .JPEG image, etc.)
"""
def __init__(self, idname, default="", dlg_msg="Choose file...",
wildcard="All files (*.*)|*.*", btn_lbl="Choose...",
label="", visible=True):
Property.__init__(self, idname, default, label, visible)
self.dlg_msg = dlg_msg
self.wildcard = wildcard
self.btn_lbl = btn_lbl
self._RunErrorCheck()
def _RunErrorCheck(self):
if type(self.value) != str:
raise TypeError("OpenFileChooserField value must be a string!")
def GetDlgMessage(self):
return self.dlg_msg
def GetWildcard(self):
return self.wildcard
def GetBtnLabel(self):
return self.btn_lbl
def CreateUI(self, parent, sizer):
fold_panel = self.CreateFoldPanel(sizer)
pnl = wx.Panel(fold_panel)
pnl.SetBackgroundColour(wx.Colour("#464646"))
vbox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.textcontrol = TextCtrl(pnl,
value=self.GetValue(), style=wx.BORDER_SIMPLE,
placeholder="", size=(-1, 32))
hbox.Add(self.textcontrol, proportion=1, flag=wx.EXPAND | wx.BOTH)
self.button = Button(pnl, label=self.GetBtnLabel(), size=(-1, 32))
hbox.Add(self.button, flag=wx.LEFT, border=5)
self.button.Bind(EVT_BUTTON, self.WidgetEvent)
vbox.Add(hbox, flag=wx.EXPAND | wx.BOTH)
vbox.Fit(pnl)
pnl.SetSizer(vbox)
self.AddToFoldPanel(sizer, fold_panel, pnl, spacing=10)
def WidgetEvent(self, event):
dlg = wx.FileDialog(
None,
message=self.GetDlgMessage(),
defaultDir=os.getcwd(),
defaultFile="",
wildcard=self.GetWildcard(),
style=wx.FD_OPEN | wx.FD_CHANGE_DIR | wx.FD_FILE_MUST_EXIST | wx.FD_PREVIEW
)
if dlg.ShowModal() == wx.ID_OK:
paths = dlg.GetPaths()
filetype = os.path.splitext(paths[0])[1]
if filetype not in constants.SUPPORTED_FT_OPEN_LIST:
dlg = wx.MessageDialog(
None,
"That file type isn't currently supported!",
"Cannot Open Image!",
style=wx.ICON_EXCLAMATION
)
dlg.ShowModal()
else:
self.SetValue(paths[0])
self.textcontrol.ChangeValue(self.GetValue())
class LabelProp(Property):
""" Allows setting and resetting text on a label. """
def __init__(self, idname, default="", label="", visible=True):
Property.__init__(self, idname, default, label, visible)
self._RunErrorCheck()
def CreateUI(self, parent, sizer):
label = wx.StaticText(parent, label=self.GetLabel())
label.SetForegroundColour("#fff")
sizer.Add(label, flag=wx.LEFT | wx.TOP, border=5)
static_label = wx.StaticText(parent, label=self.GetValue())
static_label.SetForegroundColour("#fff")
sizer.Add(static_label, flag=wx.LEFT | wx.TOP, border=5)
class StringProp(Property):
def __init__(self, idname, default="Text", dlg_msg="Edit text:",
dlg_title="Edit Text", label="", visible=True):
Property.__init__(self, idname, default, label, visible)
self.dlg_msg = dlg_msg
self.dlg_title = dlg_title
self._RunErrorCheck()
def GetDlgMessage(self):
return self.dlg_msg
def GetDlgTitle(self):
return self.dlg_title
def CreateUI(self, parent, sizer):
label = wx.StaticText(parent, label=self.GetLabel())
label.SetForegroundColour("#fff")
sizer.Add(label, flag=wx.LEFT | wx.TOP, border=5)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.textcontrol = wx.TextCtrl(
parent,
id=wx.ID_ANY,
value=self.GetValue(),
style=wx.TE_READONLY
)
hbox.Add(self.textcontrol, proportion=1)
self.button = wx.Button(
parent,
id=wx.ID_ANY,
label="Edit"
)
hbox.Add(self.button, flag=wx.LEFT, border=5)
self.button.Bind(
wx.EVT_BUTTON,
self.WidgetEvent
)
vbox.Add(hbox, flag=wx.EXPAND)
sizer.Add(vbox, flag=wx.ALL | wx.EXPAND, border=5)
def WidgetEvent(self, event):
dlg = wx.TextEntryDialog(None, self.GetDlgMessage(),
self.GetDlgTitle(), self.GetValue())
if dlg.ShowModal() == wx.ID_OK:
value = dlg.GetValue()
self.SetValue(value)
self.textcontrol.ChangeValue(self.GetValue())
```
#### File: interface/artproviders/menubar.py
```python
import wx
import wx.lib.agw.flatmenu as flatmenu
from wx.lib.agw.artmanager import ArtManager, RendererBase, DCSaver
from wx.lib.agw.fmresources import ControlFocus, ControlPressed
def switchRGBtoBGR(colour):
return wx.Colour(colour.Blue(), colour.Green(), colour.Red())
class UIMenuBarRenderer(flatmenu.FMRenderer):
def __init__(self):
flatmenu.FMRenderer.__init__(self)
self.highlightCheckAndRadio = True
self.menuFaceColour = wx.Colour("#252525")
self.menuBarFaceColour = wx.Colour("#252525")
self.menuBarFocusFaceColour = wx.Colour("#5874C5")
self.menuBarFocusBorderColour = wx.Colour("#5874C5")
self.menuBarPressedFaceColour = wx.Colour("#5874C5")
self.menuBarPressedBorderColour = wx.Colour("#5874C5")
self.menuFocusFaceColour = wx.Colour("#5874C5")
self.menuFocusBorderColour = wx.Colour("#5874C5")
self.menuPressedFaceColour = wx.Colour("#5874C5")
self.menuPressedBorderColour = wx.Colour("#5874C5")
self.buttonFaceColour = wx.Colour("#5874C5")
self.buttonBorderColour = wx.Colour("#5874C5")
self.buttonFocusFaceColour = wx.Colour("#5874C5")
self.buttonFocusBorderColour = wx.Colour("#5874C5")
self.buttonPressedFaceColour = wx.Colour("#5874C5")
self.buttonPressedBorderColour = wx.Colour("#5874C5")
def DrawMenuItem(self, item, dc, xCoord, yCoord, imageMarginX, markerMarginX, textX, rightMarginX, selected=False, backgroundImage=None):
"""
Draws the menu item.
:param `item`: a :class:`FlatMenuItem` instance;
:param `dc`: an instance of :class:`wx.DC`;
:param integer `xCoord`: the current x position where to draw the menu;
:param integer `yCoord`: the current y position where to draw the menu;
:param integer `imageMarginX`: the spacing between the image and the menu border;
:param integer `markerMarginX`: the spacing between the checkbox/radio marker and
the menu border;
:param integer `textX`: the menu item label x position;
:param integer `rightMarginX`: the right margin between the text and the menu border;
:param bool `selected`: ``True`` if this menu item is currentl hovered by the mouse,
``False`` otherwise.
:param `backgroundImage`: if not ``None``, an instance of :class:`wx.Bitmap` which will
become the background image for this :class:`FlatMenu`.
"""
borderXSize = item._parentMenu.GetBorderXWidth()
itemHeight = item._parentMenu.GetItemHeight()
menuWidth = item._parentMenu.GetMenuWidth()
# Define the item actual rectangle area
itemRect = wx.Rect(xCoord, yCoord, menuWidth, itemHeight)
# Define the drawing area
rect = wx.Rect(xCoord + 2, yCoord, menuWidth - 4, itemHeight)
# Draw the background
backColour = self.menuFaceColour
penColour = backColour
backBrush = wx.Brush(backColour)
leftMarginWidth = item._parentMenu.GetLeftMarginWidth()
if backgroundImage is None:
pen = wx.Pen(penColour)
dc.SetPen(pen)
dc.SetBrush(backBrush)
dc.DrawRectangle(rect)
# Draw the left margin gradient
if self.drawLeftMargin:
self.DrawLeftMargin(item, dc, itemRect)
# check if separator
if item.IsSeparator():
# Separator is a small grey line separating between menu items.
sepWidth = xCoord + menuWidth - textX - 1
self.DrawSeparator(dc, xCoord, yCoord, textX, sepWidth)
return
# Keep the item rect
item._rect = itemRect
# Get the bitmap base on the item state (disabled, selected ..)
bmp = item.GetSuitableBitmap(selected)
# First we draw the selection rectangle
if selected:
self.DrawMenuButton(dc, rect.Deflate(1, 0), ControlFocus)
#copy.Inflate(0, menubar._spacer)
if bmp.IsOk():
# Calculate the postion to place the image
imgHeight = bmp.GetHeight()
imgWidth = bmp.GetWidth()
if imageMarginX == 0:
xx = rect.x + (leftMarginWidth - imgWidth) / 2
else:
xx = rect.x + ((leftMarginWidth - rect.height) - imgWidth) / 2 + rect.height
yy = rect.y + (rect.height - imgHeight) / 2
dc.DrawBitmap(bmp, xx, yy, True)
if item.GetKind() == wx.ITEM_CHECK:
# Checkable item
if item.IsChecked():
# Draw surrounding rectangle around the selection box
xx = rect.x + 1
yy = rect.y + 1
rr = wx.Rect(xx, yy, rect.height - 2, rect.height - 2)
if not selected and self.highlightCheckAndRadio:
self.DrawButton(dc, rr, ControlFocus)
dc.DrawBitmap(item._checkMarkBmp, rr.x + (rr.width - 16) / 2, rr.y + (rr.height - 16) / 2, True)
if item.GetKind() == wx.ITEM_RADIO:
# Checkable item
if item.IsChecked():
# Draw surrounding rectangle around the selection box
xx = rect.x + 1
yy = rect.y + 1
rr = wx.Rect(xx, yy, rect.height - 2, rect.height - 2)
if not selected and self.highlightCheckAndRadio:
self.DrawButton(dc, rr, ControlFocus)
dc.DrawBitmap(item._radioMarkBmp, rr.x + (rr.width - 16) / 2, rr.y + (rr.height - 16) / 2, True)
# Draw text - without accelerators
text = item.GetLabel()
if text:
font = item.GetFont()
if font is None:
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
# EDITED - This is my edit to always have the font color white:
enabledTxtColour = wx.Colour("#fff")
disabledTxtColour = self.itemTextColourDisabled
textColour = (item.IsEnabled() and [enabledTxtColour] or [disabledTxtColour])[0]
if item.IsEnabled() and item.GetTextColour():
textColour = item.GetTextColour()
dc.SetFont(font)
w, h = dc.GetTextExtent(text)
dc.SetTextForeground(textColour)
if item._mnemonicIdx != wx.NOT_FOUND:
# We divide the drawing to 3 parts
text1 = text[0:item._mnemonicIdx]
text2 = text[item._mnemonicIdx]
text3 = text[item._mnemonicIdx + 1:]
w1, dummy = dc.GetTextExtent(text1)
w2, dummy = dc.GetTextExtent(text2)
w3, dummy = dc.GetTextExtent(text3)
posx = xCoord + textX + borderXSize
posy = (itemHeight - h) / 2 + yCoord
# Draw first part
dc.DrawText(text1, posx, posy)
# mnemonic
if "__WXGTK__" not in wx.Platform:
font.SetUnderlined(True)
dc.SetFont(font)
posx += w1
dc.DrawText(text2, posx, posy)
# last part
font.SetUnderlined(False)
dc.SetFont(font)
posx += w2
dc.DrawText(text3, posx, posy)
else:
w, h = dc.GetTextExtent(text)
dc.DrawText(text, xCoord + textX + borderXSize, (itemHeight - h) / 2 + yCoord)
# Now draw accelerator
# Accelerators are aligned to the right
if item.GetAccelString():
accelWidth, accelHeight = dc.GetTextExtent(item.GetAccelString())
dc.DrawText(item.GetAccelString(), xCoord + rightMarginX -
accelWidth, (itemHeight - accelHeight) / 2 + yCoord)
# Check if this item has sub-menu - if it does, draw
# right arrow on the right margin
if item.GetSubMenu():
# Draw arrow
rightArrowBmp = wx.Bitmap(menu_right_arrow_xpm)
rightArrowBmp.SetMask(wx.Mask(rightArrowBmp, wx.WHITE))
xx = xCoord + rightMarginX + borderXSize
rr = wx.Rect(xx, rect.y + 1, rect.height - 2, rect.height - 2)
dc.DrawBitmap(rightArrowBmp, rr.x + 4, rr.y + (rr.height - 16) / 2, True)
def DrawMenuBar(self, menubar, dc):
"""
Draws everything for :class:`FlatMenuBar`.
:param `menubar`: an instance of :class:`FlatMenuBar`.
:param `dc`: an instance of :class:`wx.DC`.
"""
#artMgr = ArtManager.Get()
fnt = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
# EDITED - This is my edit to always make the font color white
textColour = wx.Colour("#fff")
highlightTextColour = wx.Colour("#fff")
dc.SetFont(fnt)
dc.SetTextForeground(textColour)
clientRect = menubar.GetClientRect()
self.DrawMenuBarBackground(dc, clientRect)
padding, dummy = dc.GetTextExtent("W")
posx = 0
posy = menubar._margin
# Monkey-patch padding between menus
padding += 11
# ---------------------------------------------------------------------------
# Draw as much items as we can if the screen is not wide enough, add all
# missing items to a drop down menu
# ---------------------------------------------------------------------------
menuBarRect = menubar.GetClientRect()
# mark all items as non-visibles at first
for item in menubar._items:
item.SetRect(wx.Rect())
for item in menubar._items:
# Handle accelerator ('&')
title = item.GetTitle()
fixedText = title
location, labelOnly = flatmenu.GetAccelIndex(fixedText)
# Get the menu item rect
textWidth, textHeight = dc.GetTextExtent(fixedText)
#rect = wx.Rect(posx+menubar._spacer/2, posy, textWidth, textHeight)
rect = wx.Rect(posx + padding / 2, posy, textWidth, textHeight)
# Can we draw more??
# the +DROP_DOWN_ARROW_WIDTH is the width of the drop down arrow
if posx + rect.width + flatmenu.DROP_DOWN_ARROW_WIDTH >= menuBarRect.width:
break
# In this style the button highlight includes the menubar margin
button_rect = wx.Rect(*rect)
button_rect.height = menubar._menuBarHeight
#button_rect.width = rect.width + menubar._spacer
button_rect.width = rect.width + padding
button_rect.x = posx
button_rect.y = 0
# Keep the item rectangle, will be used later in functions such
# as 'OnLeftDown', 'OnMouseMove'
copy = wx.Rect(*button_rect)
#copy.Inflate(0, menubar._spacer)
item.SetRect(copy)
selected = False
if item.GetState() == ControlFocus:
self.DrawMenuBarButton(dc, button_rect, ControlFocus)
dc.SetTextForeground(highlightTextColour)
selected = True
else:
dc.SetTextForeground(textColour)
ww, hh = dc.GetTextExtent(labelOnly)
textOffset = (rect.width - ww) / 2
if not menubar._isLCD and item.GetTextBitmap().IsOk() and not selected:
dc.DrawBitmap(item.GetTextBitmap(), rect.x, rect.y, True)
elif not menubar._isLCD and item.GetSelectedTextBitmap().IsOk() and selected:
dc.DrawBitmap(item.GetSelectedTextBitmap(), rect.x, rect.y, True)
else:
if not menubar._isLCD:
# Draw the text on a bitmap using memory dc,
# so on following calls we will use this bitmap instead
# of calculating everything from scratch
bmp = wx.Bitmap(rect.width, rect.height)
memDc = wx.MemoryDC()
memDc.SelectObject(bmp)
if selected:
memDc.SetTextForeground(highlightTextColour)
else:
memDc.SetTextForeground(textColour)
# Fill the bitmap with the masking colour
memDc.SetPen(wx.Pen(wx.Colour(255, 0, 0)))
memDc.SetBrush(wx.Brush(wx.Colour(255, 0, 0)))
memDc.DrawRectangle(0, 0, rect.width, rect.height)
memDc.SetFont(fnt)
if location == wx.NOT_FOUND or location >= len(fixedText):
# draw the text
if not menubar._isLCD:
memDc.DrawText(title, textOffset, 0)
dc.DrawText(title, rect.x + textOffset, rect.y)
else:
# underline the first '&'
before = labelOnly[0:location]
underlineLetter = labelOnly[location]
after = labelOnly[location + 1:]
# before
if not menubar._isLCD:
memDc.DrawText(before, textOffset, 0)
dc.DrawText(before, rect.x + textOffset, rect.y)
# underlineLetter
if "__WXGTK__" not in wx.Platform:
w1, h = dc.GetTextExtent(before)
fnt.SetUnderlined(True)
dc.SetFont(fnt)
dc.DrawText(underlineLetter, rect.x + w1 + textOffset, rect.y)
if not menubar._isLCD:
memDc.SetFont(fnt)
memDc.DrawText(underlineLetter, textOffset + w1, 0)
else:
w1, h = dc.GetTextExtent(before)
dc.DrawText(underlineLetter, rect.x + w1 + textOffset, rect.y)
if not menubar._isLCD:
memDc.DrawText(underlineLetter, textOffset + w1, 0)
# Draw the underline ourselves since using the Underline in GTK,
# causes the line to be too close to the letter
uderlineLetterW, uderlineLetterH = dc.GetTextExtent(underlineLetter)
dc.DrawLine(rect.x + w1 + textOffset, rect.y + uderlineLetterH - 2,
rect.x + w1 + textOffset + uderlineLetterW, rect.y + uderlineLetterH - 2)
# after
w2, h = dc.GetTextExtent(underlineLetter)
fnt.SetUnderlined(False)
dc.SetFont(fnt)
dc.DrawText(after, rect.x + w1 + w2 + textOffset, rect.y)
if not menubar._isLCD:
memDc.SetFont(fnt)
memDc.DrawText(after, w1 + w2 + textOffset, 0)
if not menubar._isLCD:
memDc.SelectObject(wx.NullBitmap)
# Set masking colour to the bitmap
bmp.SetMask(wx.Mask(bmp, wx.Colour(255, 0, 0)))
if selected:
item.SetSelectedTextBitmap(bmp)
else:
item.SetTextBitmap(bmp)
posx += rect.width + padding # + menubar._spacer
# Get a background image of the more menu button
moreMenubtnBgBmpRect = wx.Rect(*menubar.GetMoreMenuButtonRect())
if not menubar._moreMenuBgBmp:
menubar._moreMenuBgBmp = wx.Bitmap(moreMenubtnBgBmpRect.width, moreMenubtnBgBmpRect.height)
if menubar._showToolbar and len(menubar._tbButtons) > 0:
rectX = 0
rectWidth = clientRect.width - moreMenubtnBgBmpRect.width
if len(menubar._items) == 0:
rectHeight = clientRect.height
rectY = 0
else:
rectHeight = clientRect.height - menubar._menuBarHeight
rectY = menubar._menuBarHeight
rr = wx.Rect(rectX, rectY, rectWidth, rectHeight)
self.DrawToolBarBg(dc, rr)
menubar.DrawToolbar(dc, rr)
if menubar._showCustomize or menubar.GetInvisibleMenuItemCount() > 0 or menubar.GetInvisibleToolbarItemCount() > 0:
memDc = wx.MemoryDC()
memDc.SelectObject(menubar._moreMenuBgBmp)
try:
memDc.Blit(0, 0, menubar._moreMenuBgBmp.GetWidth(), menubar._moreMenuBgBmp.GetHeight(), dc,
moreMenubtnBgBmpRect.x, moreMenubtnBgBmpRect.y)
except:
pass
memDc.SelectObject(wx.NullBitmap)
# Draw the drop down arrow button
menubar.DrawMoreButton(dc, menubar._dropDownButtonState)
# Set the button rect
menubar._dropDownButtonArea = moreMenubtnBgBmpRect
def DrawMenuBarButton(self, dc, rect, state):
"""
Draws the highlight on a :class:`FlatMenuBar`.
:param `dc`: an instance of :class:`wx.DC`;
:param `rect`: an instance of :class:`wx.Rect`, representing the button client rectangle;
:param integer `state`: the button state.
"""
# switch according to the status
if state == ControlFocus:
penColour = self.menuBarFocusBorderColour
brushColour = self.menuBarFocusFaceColour
elif state == ControlPressed:
penColour = self.menuBarPressedBorderColour
brushColour = self.menuBarPressedFaceColour
dcsaver = DCSaver(dc)
dc.SetPen(wx.Pen(penColour))
dc.SetBrush(wx.Brush(brushColour))
dc.DrawRoundedRectangle(rect, 3)
def DrawMenuButton(self, dc, rect, state):
"""
Draws the highlight on a FlatMenu
:param `dc`: an instance of :class:`wx.DC`;
:param `rect`: an instance of :class:`wx.Rect`, representing the button client rectangle;
:param integer `state`: the button state.
"""
# switch according to the status
if state == ControlFocus:
penColour = self.menuFocusBorderColour
brushColour = self.menuFocusFaceColour
elif state == ControlPressed:
penColour = self.menuPressedBorderColour
brushColour = self.menuPressedFaceColour
dcsaver = DCSaver(dc)
dc.SetPen(wx.Pen(penColour))
dc.SetBrush(wx.Brush(brushColour))
dc.DrawRoundedRectangle(rect, 3)
def DrawMenu(self, flatmenu, dc):
"""
Draws the menu.
:param `flatmenu`: the :class:`FlatMenu` instance we need to paint;
:param `dc`: an instance of :class:`wx.DC`.
"""
menuRect = flatmenu.GetClientRect()
menuBmp = wx.Bitmap(menuRect.width, menuRect.height)
mem_dc = wx.MemoryDC()
mem_dc.SelectObject(menuBmp)
# colour the menu face with background colour
backColour = self.menuFaceColour
backBrush = wx.Brush(backColour)
pen = wx.Pen(wx.TRANSPARENT_PEN)
mem_dc.SetPen(pen)
mem_dc.SetBrush(backBrush)
mem_dc.DrawRectangle(menuRect)
backgroundImage = flatmenu._backgroundImage
if backgroundImage:
mem_dc.DrawBitmap(backgroundImage, flatmenu._leftMarginWidth, 0, True)
# draw items
posy = 3
nItems = len(flatmenu._itemsArr)
# make all items as non-visible first
for item in flatmenu._itemsArr:
item.Show(False)
visibleItems = 0
screenHeight = wx.SystemSettings.GetMetric(wx.SYS_SCREEN_Y)
numCols = flatmenu.GetNumberColumns()
switch, posx, index = 1e6, 0, 0
if numCols > 1:
switch = int(math.ceil((nItems - flatmenu._first)/float(numCols)))
# If we have to scroll and are not using the scroll bar buttons we need to draw
# the scroll up menu item at the top.
if not self.scrollBarButtons and flatmenu._showScrollButtons:
posy += flatmenu.GetItemHeight()
for nCount in range(flatmenu._first, nItems):
visibleItems += 1
item = flatmenu._itemsArr[nCount]
self.DrawMenuItem(item, mem_dc, posx, posy,
flatmenu._imgMarginX, flatmenu._markerMarginX,
flatmenu._textX, flatmenu._rightMarginPosX,
nCount == flatmenu._selectedItem,
backgroundImage=backgroundImage)
posy += item.GetHeight()
item.Show()
if visibleItems >= switch:
posy = 2
index += 1
posx = flatmenu._menuWidth*index
visibleItems = 0
# make sure we draw only visible items
pp = flatmenu.ClientToScreen(wx.Point(0, posy))
menuBottom = (self.scrollBarButtons and [pp.y] or [pp.y + flatmenu.GetItemHeight()*2])[0]
if menuBottom > screenHeight:
break
if flatmenu._showScrollButtons:
if flatmenu._upButton:
flatmenu._upButton.Draw(mem_dc)
if flatmenu._downButton:
flatmenu._downButton.Draw(mem_dc)
dc.Blit(0, 0, menuBmp.GetWidth(), menuBmp.GetHeight(), mem_dc, 0, 0)
``` |
{
"source": "jmhernan/NIreland_NLP",
"score": 3
} |
#### File: NIreland_NLP/class_nn/embeddings_google.py
```python
import numpy as np
import pandas as pd
import nltk
from keras.utils.np_utils import to_categorical
from keras.preprocessing.text import Tokenizer
from keras import models
from keras import layers
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
from keras import utils
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
import os
from nltk.stem import PorterStemmer
from sklearn.model_selection import train_test_split
from gensim.models import KeyedVectors
## Set the file pathway and download corpus
this_file_path = os.path.abspath(__file__)
folder_root = os.path.split(this_file_path)[0]
repo_root = os.path.split(folder_root)[0]
repo_path = os.path.join(repo_root)
PATH_TO_GV = os.path.join(folder_root, 'wordvec') + '/'
df = pd.read_csv(os.path.join(repo_path, 'justifications_clean_text_ohe.csv'))
# Collapse justification categories from 12 to 6 -- approach #2
df['just_category_6'] = df['justification_cat']
df['just_category_6'] = df['just_category_6'].replace(['J_Emergency-Policy', 'J_Intelligence', 'J_Last-resort', 'J_Utilitarian-Deterrence', 'J_Law-and-order'], 'J_Security')
df['just_category_6'] = df['just_category_6'].replace(['J_Legal_Procedure'], 'J_Legal')
df['just_category_6'] = df['just_category_6'].replace(['J_Political-Strategic'], 'J_Political')
df['just_category_6'] = df['just_category_6'].replace(['J_Denial', 'J_Intl-Domestic_Precedent'], 'J_DenyHRVio') #
df['just_category_6'] = df['just_category_6'].replace(['J_Development-Unity'], 'J_Misc')
df['just_categories'] = df['just_category_6']
# Create a unique number id for each justification category
col = ['just_categories', 'clean_text']
df = df[col]
df = df[pd.notnull(df['clean_text'])]
df.columns = ['just_categories', 'clean_text']
df['category_id'] = df['just_categories'].factorize()[0]
category_id_df = df[['just_categories', 'category_id']].drop_duplicates().sort_values('category_id')
category_to_id = dict(category_id_df.values)
id_to_category = dict(category_id_df[['category_id', 'just_categories']].values)
df.head()
######################################
### Stem sentences outside of grid ###
######################################
ps = PorterStemmer()
def stem_sentences(sentence):
tokens = sentence.split()
stemmed_tokens = [ps.stem(token) for token in tokens]
return ' '.join(stemmed_tokens)
df['stem_text'] = df['clean_text'].apply(stem_sentences)
#############################################
### Divide into training and testing data ###
#############################################
#sentences = df['stem_text'].values # include stopwords, stemmed
sentences = df['clean_text'] # include stopwords, unstemmed
y = df['just_categories']
tokenizer = Tokenizer()
tokenizer.fit_on_texts(sentences)
sequences = tokenizer.texts_to_sequences(sentences)
word_index = tokenizer.word_index # word and their token # ordered by most frequent
print('Found %s unique tokens.' % len(word_index))
max_words = 5153 # total words of vocabulary we will consider
num_words = [len(words.split()) for words in sentences]
max_seq_len = max(num_words) + 1
from keras.preprocessing.sequence import pad_sequences
text_tok = pad_sequences(sequences, maxlen=max_seq_len+1)
text_tok.shape
np.mean(text_tok > 0)
from keras.utils import to_categorical
encoder = LabelEncoder()
encoder.fit(y)
labels = encoder.transform(y)
num_classes = np.max(labels) + 1
labels = utils.to_categorical(labels, num_classes)
print('Shape of data tensor:', text_tok.shape)
print('Shape of label tensor:', labels.shape)
# split training data into test, validation
x_train, x_test, y_train, y_test = train_test_split(text_tok, labels, test_size=0.2, random_state = 42)
# Prepare embedding matrix
word_vector_dim=100
vocabulary_size= max_words+1
embedding_matrix = np.zeros((vocabulary_size, word_vector_dim))
nb_filters = 64
filter_size_a = 2
drop_rate = 0.5
my_optimizer = 'adam'
from keras.layers import Input, Embedding, Dropout, Conv1D, GlobalMaxPooling1D, Dense, Concatenate, MaxPooling1D, Flatten
from keras.models import Model, load_model
from keras.layers import SpatialDropout1D
my_input = Input(shape=(None,))
embedding = Embedding(input_dim=embedding_matrix.shape[0], input_length=max_seq_len,
output_dim=word_vector_dim, trainable=True,)(my_input)
x = Conv1D(filters = nb_filters, kernel_size = filter_size_a,
activation = 'relu',)(embedding)
x = SpatialDropout1D(drop_rate)(x)
x = MaxPooling1D(pool_size=5)(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
prob = Dense(6, activation = 'softmax',)(x)
model = Model(my_input, prob)
model.compile(loss='categorical_crossentropy', optimizer = my_optimizer,
metrics = ['accuracy'])
model.fit(x_train, y_train, # Target vector
epochs=20, # Three epochs
verbose=1, # No output
batch_size=100, # Number of observations per batch
validation_data=(x_test, y_test))
# add the google embeddings
# Prepare embedding matrix
word_vectors = KeyedVectors.load_word2vec_format(PATH_TO_GV + 'GoogleNews-vectors-negative300.bin', binary=True)
word_vector_dim=300
vocabulary_size= max_words + 1
embedding_matrix = np.zeros((vocabulary_size, word_vector_dim))
for word, i in word_index.items():
if i>=max_words:
continue
try:
embedding_vector = word_vectors[word]
embedding_matrix[i] = embedding_vector
except KeyError:
embedding_matrix[i]=np.random.normal(0,np.sqrt(0.25),word_vector_dim)
len(embedding_matrix)
embedding_matrix.shape
type(embedding_matrix)
nonzero_elements = np.count_nonzero(np.count_nonzero(embedding_matrix, axis=1))
nonzero_elements / max_words
# Setting parameters for the NN
nb_filters = 128
filter_size_a = 3
drop_rate = 0.5
my_optimizer = 'adam'
from keras.layers import Input, Embedding, Dropout, Conv1D, GlobalMaxPooling1D, Dense, Concatenate, MaxPooling1D, Flatten
from keras.models import Model, load_model
## Build the neural network
my_input = Input(shape=(max_seq_len+1,))
embedding = Embedding(input_dim=embedding_matrix.shape[0], # vocab size, including the 0-th word used for padding
output_dim=word_vector_dim,
weights=[embedding_matrix], # we pass our pre-trained embeddings
input_length=max_seq_len+1,
trainable=True
)(my_input)
# Kernel size is how big your window is. Putting x number of words into the NN together at a time from each sentence.
x = Conv1D(filters = nb_filters, kernel_size = filter_size_a,
activation = 'relu',)(embedding)
x = MaxPooling1D(pool_size=5)(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
prob = Dense(6, activation = 'softmax',)(x)
model = Model(my_input, prob)
model.compile(loss='categorical_crossentropy', optimizer = my_optimizer,
metrics = ['accuracy'])
x = model.fit(x_train, y_train, # Target vector
epochs=20, # Three epochs
verbose=1, # No output
batch_size=100, # Number of observations per batch
validation_data=(x_test, y_test))
``` |
{
"source": "jmhessel/FIghtingWords",
"score": 3
} |
#### File: jmhessel/FIghtingWords/fighting_words.py
```python
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer as CV
import string
exclude = set(string.punctuation)
def basic_sanitize(in_string):
'''Returns a very roughly sanitized version of the input string.'''
return_string = ' '.join(in_string.encode('ascii', 'ignore').strip().split())
return_string = ''.join(ch for ch in return_string if ch not in exclude)
return_string = return_string.lower()
return_string = ' '.join(return_string.split())
return return_string
def bayes_compare_language(l1, l2, ngram = 1, prior=.01, cv = None):
'''
Arguments:
- l1, l2; a list of strings from each language sample
- ngram; an int describing up to what n gram you want to consider (1 is unigrams,
2 is bigrams + unigrams, etc). Ignored if a custom CountVectorizer is passed.
- prior; either a float describing a uniform prior, or a vector describing a prior
over vocabulary items. If you're using a predefined vocabulary, make sure to specify that
when you make your CountVectorizer object.
- cv; a sklearn.feature_extraction.text.CountVectorizer object, if desired.
Returns:
- A list of length |Vocab| where each entry is a (n-gram, zscore) tuple.'''
if cv is None and type(prior) is not float:
print "If using a non-uniform prior:"
print "Please also pass a count vectorizer with the vocabulary parameter set."
quit()
l1 = [basic_sanitize(l) for l in l1]
l2 = [basic_sanitize(l) for l in l2]
if cv is None:
cv = CV(decode_error = 'ignore', min_df = 10, max_df = .5, ngram_range=(1,ngram),
binary = False,
max_features = 15000)
counts_mat = cv.fit_transform(l1+l2).toarray()
# Now sum over languages...
vocab_size = len(cv.vocabulary_)
print "Vocab size is {}".format(vocab_size)
if type(prior) is float:
priors = np.array([prior for i in range(vocab_size)])
else:
priors = prior
z_scores = np.empty(priors.shape[0])
count_matrix = np.empty([2, vocab_size], dtype=np.float32)
count_matrix[0, :] = np.sum(counts_mat[:len(l1), :], axis = 0)
count_matrix[1, :] = np.sum(counts_mat[len(l1):, :], axis = 0)
a0 = np.sum(priors)
n1 = 1.*np.sum(count_matrix[0,:])
n2 = 1.*np.sum(count_matrix[1,:])
print "Comparing language..."
for i in range(vocab_size):
#compute delta
term1 = np.log((count_matrix[0,i] + priors[i])/(n1 + a0 - count_matrix[0,i] - priors[i]))
term2 = np.log((count_matrix[1,i] + priors[i])/(n2 + a0 - count_matrix[1,i] - priors[i]))
delta = term1 - term2
#compute variance on delta
var = 1./(count_matrix[0,i] + priors[i]) + 1./(count_matrix[1,i] + priors[i])
#store final score
z_scores[i] = delta/np.sqrt(var)
index_to_term = {v:k for k,v in cv.vocabulary_.iteritems()}
sorted_indices = np.argsort(z_scores)
return_list = []
for i in sorted_indices:
return_list.append((index_to_term[i], z_scores[i]))
return return_list
``` |
{
"source": "jmhessel/fmpytorch",
"score": 2
} |
#### File: fmpytorch/second_order/second_order_naive.py
```python
import torch
from torch.autograd import Variable
from torch import nn
import time
class SecondOrderInteraction(torch.nn.Module):
def __init__(self, n_feats, n_factors):
super(SecondOrderInteraction, self).__init__()
self.n_feats = n_feats
self.n_factors = n_factors
self.v = nn.Parameter(torch.Tensor(self.n_feats, self.n_factors))
self.v.data.uniform_(-0.01, 0.01)
def forward(self, x):
self.batch_size = x.size()[0]
self.n_feats = x.size()[-1]
self.n_factors = self.v.size()[-1]
output = Variable(x.data.new(self.batch_size, self.n_feats, self.n_feats).zero_())
all_interactions = torch.mm(self.v, self.v.t())
for b in range(self.batch_size):
for i in range(self.n_feats):
for j in range(i+1, self.n_feats):
output[b,i,j] = all_interactions[i,j] * x[b,i] * x[b,j]
res = output.sum(1).sum(1,keepdim=True)
return res
```
#### File: fmpytorch/tests/test_second_order.py
```python
from __future__ import print_function
import numpy as np
import torch
import torch.nn.functional as F
from fmpytorch.second_order.second_order_naive import SecondOrderInteraction as SOISlow
from fmpytorch.second_order.second_order_fast import SecondOrderInteraction as SOIFast
from torch.autograd import Variable
INPUT_SIZE = 50
BATCH_SIZE = 32
N_FACTORS = 5
N_TESTS = 10
class ModelSlow(torch.nn.Module):
def __init__(self):
super(ModelSlow, self).__init__()
self.second_order = SOISlow(INPUT_SIZE, N_FACTORS)
def forward(self, x):
x = self.second_order(x)
return x
class ModelFast(torch.nn.Module):
def __init__(self):
super(ModelFast, self).__init__()
self.second_order = SOIFast(INPUT_SIZE, N_FACTORS)
def forward(self, x):
x = self.second_order(x)
return x
def _forward_backward_check(dtype):
np.random.seed(1)
torch.manual_seed(1)
slow = ModelSlow()
np.random.seed(1)
torch.manual_seed(1)
fast = ModelFast()
if dtype is np.float64:
slow.double()
fast.double()
for i in range(N_TESTS):
input = np.random.random((32, INPUT_SIZE)).astype(dtype)
x_slow = Variable(torch.from_numpy(input),
requires_grad=True)
x_fast = Variable(torch.from_numpy(input),
requires_grad=True)
y = Variable(torch.from_numpy(np.random.random((32, 1)).astype(dtype)))
out_slow = slow(x_slow)
out_fast = fast(x_fast)
assert np.allclose(out_slow.data.numpy(),
out_fast.data.numpy()), "Forward passes differed for {}".format(dtype)
loss_slow = F.mse_loss(out_slow, y)
loss_fast = F.mse_loss(out_fast, y)
loss_slow.backward()
loss_fast.backward()
for var_slow, var_fast in zip(slow.parameters(), fast.parameters()):
assert np.allclose(var_slow.grad.data.numpy(),
var_fast.grad.data.numpy()), "Backward passes differed for {}".format(dtype)
assert np.allclose(x_slow.grad.data.numpy(),
x_fast.grad.data.numpy()), "Backward passes differed for {}".format(dtype)
def test_forward_backward_float():
_forward_backward_check(np.float32)
def test_forward_backward_double():
_forward_backward_check(np.float64)
``` |
{
"source": "jmhessel/lxmert",
"score": 3
} |
#### File: data/vg_gqa_imgfeat/convert.py
```python
import argparse
import csv
import base64
import time
import sys
import numpy as np
import tqdm
import json
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ["img_id", "img_h", "img_w", "objects_id", "objects_conf",
"attrs_id", "attrs_conf", "num_boxes", "boxes", "features"]
def item_to_feature_tensors(item):
# 7 location features from
# https://arxiv.org/pdf/1909.11740.pdf
# assume the image has width, height = w, h
# assume the bbox has width, height = wbox, hbox
# [x1 / w, y1 / h, x2 / w, y2 / h, --- normalized coordinates
# wbox/w, hbox/h, --- normalized fractional width/height
# wbox * hbox / (w * h)] --- normalized area
# global w,h
w, h = float(item['img_w']), float(item['img_h'])
# coordinates for lower left/upper right for boxes
x1, y1, x2, y2 = item['boxes'].transpose()
wbox = x2 - x1
hbox = y2 - y1
location_features = [x1 / w, y1 / h, x2 / w,
x2 / h, wbox / w, hbox / h,
wbox * hbox / (w * h)]
location_features = np.vstack(location_features).transpose()
## And, of course, the content features
content_features = item['features']
return (item['img_id'], content_features, location_features)
def load_obj_tsv(fname, topk=None):
"""Load object features from tsv file.
:param fname: The path to the tsv file.
:param topk: Only load features for top K images (lines) in the tsv file.
Will load all the features if topk is either -1 or None.
:return: A list of image object features where each feature is a dict.
See FILENAMES above for the keys in the feature dict.
via
https://github.com/airsplay/lxmert/blob/master/src/utils.py
"""
data = []
start_time = time.time()
print("Start to load Faster-RCNN detected objects from %s" % fname)
with open(fname) as f:
reader = csv.DictReader(f, FIELDNAMES, delimiter="\t")
for i, item in tqdm.tqdm(enumerate(reader)):
for key in ['img_h', 'img_w', 'num_boxes']:
item[key] = int(item[key])
boxes = item['num_boxes']
if boxes != 36:
print('WHAT')
print(i)
print(item['img_id'])
continue
decode_config = [
('objects_id', (boxes, ), np.int64),
('objects_conf', (boxes, ), np.float32),
('attrs_id', (boxes, ), np.int64),
('attrs_conf', (boxes, ), np.float32),
('boxes', (boxes, 4), np.float32),
('features', (boxes, -1), np.float32),
]
for key, shape, dtype in decode_config:
item[key] = np.frombuffer(base64.b64decode(item[key]), dtype=dtype)
item[key] = item[key].reshape(shape)
item[key].setflags(write=False)
data.append(item_to_feature_tensors(item))
if topk is not None and len(data) == topk:
break
elapsed_time = time.time() - start_time
print("Loaded %d images in file %s in %d seconds." % (len(data), fname, elapsed_time))
content_features = np.vstack([np.expand_dims(d[1], 0) for d in data])
print("{} GB for content features".format(
content_features.nbytes / 1e9 ))
location_features = np.vstack([np.expand_dims(d[2], 0) for d in data])
id2row = dict([(k, v) for v, k in enumerate([d[0] for d in data])])
return id2row, content_features, location_features
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('obj_tsv')
parser.add_argument('dataset_name')
return parser.parse_args()
def main():
args = parse_args()
id2row, content_features, location_features = load_obj_tsv(args.obj_tsv)
with open('{}_id2row.json'.format(args.dataset_name),
'w'.format(args.dataset_name)) as f:
f.write(json.dumps(id2row))
np.savez('{}_bbox_features.npz'.format(args.dataset_name),
content_features=content_features,
location_features=location_features)
if __name__ == '__main__':
main()
```
#### File: vqa/vqa_lxr955_results/plot_results.py
```python
import argparse
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
sns.set()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--results',
default='results.txt')
return parser.parse_args()
def main():
args = parse_args()
with open(args.results) as f:
results = [x.strip().split()[-1].split('/') for x in f.readlines()]
results = [(float(x[0]), float(x[1]), float(x[-1])) for x in results]
orig_acc = np.array([r[0] for r in results])
proj_acc = np.array([r[1] for r in results])
mean_acc = np.array([r[2] for r in results])
print('Orig acc: {:.2f}, proj acc: {:.2f}, const acc: {:.2f}'.format(
np.mean(orig_acc), np.mean(proj_acc), np.mean(mean_acc)))
orig_better = np.sum(orig_acc > proj_acc)
plt.scatter(orig_acc, proj_acc)
plt.plot([0, 100], [0, 100], linestyle='--', linewidth=3, color='r')
start = min(np.min(orig_acc) - 1, np.min(proj_acc) - 1)
end = max(np.max(orig_acc) + 1, np.max(orig_acc) + 1)
plt.xlim(start, end)
plt.ylim(start, end)
plt.xlabel('Accuracy Original (win rate={:.0f}%)'.format(orig_better / len(orig_acc) * 100))
plt.ylabel('Accuracy Projected')
plt.tight_layout()
plt.savefig('vqa_results.pdf')
if __name__ == '__main__':
main()
```
#### File: lxmert/src/eval_utils.py
```python
import sklearn.metrics
import numpy as np
import collections
def get_metrics_binary(pred, pred_bin, te_y):
pred, pred_bin, te_y = map(lambda x: np.array(x).flatten(),
[pred, pred_bin, te_y])
if np.sum(np.isnan(pred)) > 0:
return {'f1':-1, 'acc':-1, 'roc_auc':-1, 'precision':-1, 'recall':-1}
f1 = (sklearn.metrics.f1_score(te_y, pred_bin)
if np.mean(pred_bin) != 0 and np.mean(pred_bin) != 1
else 0)
acc = sklearn.metrics.accuracy_score(te_y, pred_bin)
roc_auc = sklearn.metrics.roc_auc_score(te_y, pred)
prec = (sklearn.metrics.precision_score(te_y, pred_bin)
if np.mean(pred_bin) != 0 and np.mean(pred_bin) != 1
else 0)
rec = sklearn.metrics.recall_score(te_y, pred_bin)
return {'f1':f1, 'acc':acc, 'roc_auc':roc_auc, 'precision':prec, 'recall':rec}
def get_metrics_multiclass(pred, pred_clf, te_y):
res = sklearn.metrics.classification_report(te_y, pred_clf, output_dict=True)
classes = set(te_y)
per_class_acc = []
for c in classes:
test_idxs = te_y == c
per_class_acc.append(np.mean(pred_clf[test_idxs] == te_y[test_idxs]))
accuracy = np.mean(pred_clf == te_y)
macro_average_stat = collections.defaultdict(list)
for label, metric_dict in res.items():
if not type(metric_dict) is dict:
continue
for m, v in metric_dict.items():
if m != 'support':
macro_average_stat[m].append(v)
res = {}
for m, stats in macro_average_stat.items():
res['macro_average_' + m] = np.mean(stats)
res['macro_acc'] = np.mean(per_class_acc)
res['acc'] = accuracy
res['macro_auc'] = sklearn.metrics.roc_auc_score(
sklearn.preprocessing.label_binarize(te_y, list(range(len(classes)))),
pred,
)
res['weighted_average_f1'] = sklearn.metrics.f1_score(
te_y,
pred_clf,
average='weighted')
return res
```
#### File: lxmert/src/finetune_param.py
```python
import argparse
import random
import numpy as np
import torch
def get_optimizer(optim):
# Bind the optimizer
if optim == 'rms':
print("Optimizer: Using RMSProp")
optimizer = torch.optim.RMSprop
elif optim == 'adam':
print("Optimizer: Using Adam")
optimizer = torch.optim.Adam
elif optim == 'adamax':
print("Optimizer: Using Adamax")
optimizer = torch.optim.Adamax
elif optim == 'sgd':
print("Optimizer: sgd")
optimizer = torch.optim.SGD
elif 'bert' in optim:
optimizer = 'bert' # The bert optimizer will be bind later.
else:
assert False, "Please add your optimizer %s in the list." % optim
return optimizer
def parse_args():
parser = argparse.ArgumentParser()
# Data Params. These are all required.
parser.add_argument("train_json",
help='comma seperated training jsons '
'e.g., "train1.json,train2.json" or "train.json". '
'For None, use -1.')
parser.add_argument("valid_json",
help='comma seperated validation jsons. For none, use -1.')
parser.add_argument("test_json",
help='comma seperated testing jsons. For none, use -1.')
parser.add_argument("image_feat_tsv", default=None,
help='comma seperated tsv for files containing extracted '
'image features. '
'e.g., "data/feats1.tsv,data/feats2.tsv" or "vg_gqa_obj36.tsv"')
parser.add_argument('output_dir', type=str, default='output')
# classifier arguments
parser.add_argument("--ans2label", default=None,
help='json dictionary mapping from strings to ints. '
'the strings are the names of the classes, and the '
'ints are their indices.')
parser.add_argument("--use_logits", default=0,
type=int,
help='Should we use the logits in the data file?')
# Training Hyper-parameters
parser.add_argument('--optim', default='bert')
parser.add_argument('--optimize_metric', default='acc',
help='which metric to optimize over the validation set?')
# set to gqa defaults
parser.add_argument('--batchSize', dest='batch_size', type=int, default=32)
parser.add_argument('--lr', type=float, default=1e-5)
parser.add_argument('--epochs', type=int, default=5)
parser.add_argument('--dropout', type=float, default=0.1)
parser.add_argument('--seed', type=int, default=9595, help='random seed')
# Debugging
parser.add_argument("--fast", action='store_const', default=False, const=True)
parser.add_argument("--tiny", action='store_const', default=False, const=True)
parser.add_argument("--tqdm", action='store_const', default=True, const=True)
# Model Loading
parser.add_argument('--load_finetune', type=str, default=None,
help='Load the finetuned model for testing.')
parser.add_argument('--loadLXMERT', dest='load_lxmert', type=str, default=None,
help='Load the pre-trained LXMERT model.')
# LXRT Model Config
# Note: LXRT = L, X, R (three encoders), Transformer
parser.add_argument("--llayers", default=9, type=int, help='Number of Language layers')
parser.add_argument("--xlayers", default=5, type=int, help='Number of CROSS-modality layers.')
parser.add_argument("--rlayers", default=5, type=int, help='Number of object Relationship layers.')
parser.add_argument("--model_type", default='full', type=str,
help='What LXMERT model type should be used?')
# Training configuration
parser.add_argument("--multiGPU", action='store_const', default=False, const=True)
parser.add_argument("--numWorkers", dest='num_workers', default=0)
# Parse the arguments.
args = parser.parse_args()
# we need to set these, but we dont want to make them mutable
args.from_scratch = False
if args.load_lxmert and '_LXRT.pth' in args.load_lxmert:
args.load_lxmert = args.load_lxmert.replace('_LXRT.pth', '')
if args.load_finetune and '.pth' in args.load_finetune:
args.load_finetune = args.load_finetune.replace('.pth', '')
# Bind optimizer class.
args.optimizer = get_optimizer(args.optim)
# Set seeds
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
return args
args = parse_args()
``` |
{
"source": "jmhessel/multi-retrieval",
"score": 3
} |
#### File: jmhessel/multi-retrieval/model_utils.py
```python
import tensorflow as tf
import collections
import sys
def make_get_pos_neg_sims(args, sim_fn):
def get_pos_neg_sims(inp):
'''
Applies the similarity function between all text_idx, img_idx pairs.
inp is a list of three arguments:
- sims: the stacked similarity matrix
- text_n_inp: how many sentences are in each document
- img_n_inp: how many images are in each document
'''
sims, text_n_inp, img_n_inp = inp
text_index_borders = tf.dtypes.cast(tf.cumsum(text_n_inp), tf.int32)
img_index_borders = tf.dtypes.cast(tf.cumsum(img_n_inp), tf.int32)
zero = tf.expand_dims(tf.expand_dims(tf.constant(0, dtype=tf.int32), axis=-1), axis=-1)
# these give the indices of the borders between documents in our big sim matrix...
text_index_borders = tf.concat([zero, text_index_borders], axis=0)
img_index_borders = tf.concat([zero, img_index_borders], axis=0)
doc2pos_sim = {}
doc2neg_img_sims = collections.defaultdict(list)
doc2neg_text_sims = collections.defaultdict(list)
# for each pair of text set and image set...
for text_idx in range(args.docs_per_batch):
for img_idx in range(args.docs_per_batch):
text_start = tf.squeeze(text_index_borders[text_idx])
text_end = tf.squeeze(text_index_borders[text_idx+1])
img_start = tf.squeeze(img_index_borders[img_idx])
img_end = tf.squeeze(img_index_borders[img_idx+1])
cur_sims = sims[text_start:text_end, img_start:img_end]
sim = sim_fn(cur_sims)
if text_idx == img_idx:
doc2pos_sim[text_idx] = sim
else: # negative cases
doc2neg_img_sims[text_idx].append(sim)
doc2neg_text_sims[img_idx].append(sim)
pos_sims, neg_img_sims, neg_text_sims = [], [], []
for idx in range(args.docs_per_batch):
pos_sims.append(doc2pos_sim[idx])
neg_img_sims.append(tf.stack(doc2neg_img_sims[idx]))
neg_text_sims.append(tf.stack(doc2neg_text_sims[idx]))
pos_sims = tf.expand_dims(tf.stack(pos_sims), -1)
neg_img_sims = tf.stack(neg_img_sims)
neg_text_sims = tf.stack(neg_text_sims)
return [pos_sims, neg_img_sims, neg_text_sims]
return get_pos_neg_sims
```
#### File: jmhessel/multi-retrieval/train_doc.py
```python
import argparse
import collections
import json
import tensorflow as tf
import numpy as np
import os
import sys
import tqdm
import text_utils
import image_utils
import eval_utils
import model_utils
import training_utils
import bipartite_utils
import pickle
import sklearn.preprocessing
from pprint import pprint
def load_data(fname):
with open(fname) as f:
return json.loads(f.read())
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('documents',
help='json of train/val/test documents.')
parser.add_argument('--image_features',
help='path to pre-extracted image-feature numpy array.')
parser.add_argument('--image_id2row',
help='path to mapping from image id --> numpy row for image features.')
parser.add_argument('--joint_emb_dim',
type=int,
help='Embedding dimension of the shared, multimodal space.',
default=1024)
parser.add_argument('--margin',
type=float,
help='Margin for computing hinge loss.',
default=.2)
parser.add_argument('--seq_len',
type=int,
help='Maximum token sequence length for each sentence before truncation.',
default=20)
parser.add_argument('--docs_per_batch',
type=int,
help='How many docs per batch? 11 docs = 10 negative samples per doc.',
default=11)
parser.add_argument('--neg_mining',
help='What type of negative mining?',
default='hard_negative',
choices=['negative_sample', 'hard_negative'],
type=str)
parser.add_argument('--sim_mode',
help='What similarity function should we use?',
default='AP',
choices=['DC','TK','AP'],
type=str)
parser.add_argument('--sim_mode_k',
help='If --sim_mode=TK/AP, what should the k be? '
'k=-1 for dynamic = min(n_images, n_sentences))? '
'if k > 0, then k=ceil(1./k * min(n_images, n_sentences))',
default=-1,
type=float)
parser.add_argument('--lr',
type=float,
help='Starting learning rate',
default=.0002)
parser.add_argument('--n_epochs',
type=int,
help='How many epochs to run for?',
default=60)
parser.add_argument('--checkpoint_dir',
type=str,
help='What directory to save checkpoints in?',
default='checkpoints')
parser.add_argument('--word2vec_binary',
type=str,
help='If cached word embeddings have not been generated, '
'what is the location of the word2vec binary?',
default=None)
parser.add_argument('--cached_word_embeddings',
type=str,
help='Where are/will the cached word embeddings saved?',
default='cached_word2vec.json')
parser.add_argument('--print_metrics',
type=int,
help='Should we print the metrics if there are ground-truth '
'labels, or no?',
default=0)
parser.add_argument('--cached_vocab',
type=str,
help='Where should we cache the vocab, if anywhere '
'(None means no caching)',
default=None)
parser.add_argument('--output',
type=str,
default=None,
help='If output is set, we will save a pkl file'
'with the validation/test metrics.')
parser.add_argument('--seed',
type=int,
help='Random seed',
default=1)
parser.add_argument('--dropout',
type=float,
default=0.5,
help='How much dropout should we apply?')
parser.add_argument('--subsample_image',
type=int,
default=-1,
help='Should we subsample images to constant lengths? '
'This option is useful if the model is being trained end2end '
'and there are memory issues.')
parser.add_argument('--subsample_text',
type=int,
default=-1,
help='Should we subsample sentences to constant lengths? '
'This option is useful if the model is being trained end2end '
'and there are memory issues.')
parser.add_argument('--rnn_type',
type=str,
default='GRU',
help='What RNN should we use')
parser.add_argument('--end2end',
type=int,
default=0,
help='Should we backprop through the whole vision network?')
parser.add_argument('--image_dir',
type=str,
default=None,
help='What image dir should we use, if end2end?')
parser.add_argument('--lr_patience',
type=int,
default=3,
help='What learning rate patience should we use?')
parser.add_argument('--lr_decay',
type=float,
default=.2,
help='What learning rate decay factor should we use?')
parser.add_argument('--min_lr',
type=float,
default=.0000001,
help='What learning rate decay factor should we use?')
parser.add_argument('--full_image_paths',
type=int,
default=0,
help='For end2end training, should we use full image paths '
'(i.e., is the file extention already on images?)?')
parser.add_argument('--test_eval',
type=int,
help='(DEBUG OPTION) If test_eval >= 1, then training '
'only happens over this many batches',
default=-1)
parser.add_argument('--force',
type=int,
default=0,
help='Should we force the run if the output exists?')
parser.add_argument('--save_predictions',
type=str,
default=None,
help='Should we save the train/val/test predictions? '
'If so --- they will be saved in this directory.')
parser.add_argument('--image_model_checkpoint',
type=str,
default=None,
help='If set, the image model will be initialized from '
'this model checkpoint.')
parser.add_argument('--text_model_checkpoint',
type=str,
default=None,
help='If set, the text model will be initialized from '
'this model checkpoint.')
parser.add_argument('--loss_mode',
help='What loss function should we use?',
default='hinge',
choices=['hinge', 'logistic', 'softmax'],
type=str)
parser.add_argument('--compute_mscoco_eval_metrics',
help='Should we compute the mscoco MT metrics?',
default=0,
type=int)
parser.add_argument('--compute_metrics_train',
help='Should we also compute metrics over the training set?',
default=1,
type=int)
parser.add_argument('--lr_warmup_steps',
help='If positive value, we will warmup the learning rate linearly '
'over this many steps.',
default=-1,
type=int)
parser.add_argument('--l2_norm',
help='If 1, we will l2 normalize extracted features, else, no normalization.',
default=1,
type=int)
parser.add_argument('--n_layers',
help='How many layers in the encoders?',
default=1,
type=int,
choices=[1,2,3])
parser.add_argument('--scale_image_features',
help='Should we standard scale image features?',
default=0,
type=int)
args = parser.parse_args()
# check to make sure that various flags are set correctly
if args.end2end:
assert args.image_dir is not None
if not args.end2end:
assert args.image_features is not None and args.image_id2row is not None
# print out some info about the run's inputs/outputs
if args.output and '.pkl' not in args.output:
args.output += '.pkl'
if args.output:
print('Output will be saved to {}'.format(args.output))
print('Model checkpoints will be saved in {}'.format(args.checkpoint_dir))
if args.output and os.path.exists(args.output) and not args.force:
print('{} already done! If you want to force it, set --force 1'.format(args.output))
quit()
if not os.path.exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
if args.save_predictions:
if not os.path.exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
os.makedirs(args.checkpoint_dir + '/train')
os.makedirs(args.checkpoint_dir + '/val')
os.makedirs(args.checkpoint_dir + '/test')
return args
def main():
args = parse_args()
np.random.seed(args.seed)
data = load_data(args.documents)
train, val, test = data['train'], data['val'], data['test']
np.random.shuffle(train); np.random.shuffle(val); np.random.shuffle(test)
max_n_sentence, max_n_image = -1, -1
for d in train + val + test:
imgs, sents, meta = d
max_n_sentence = max(max_n_sentence, len(sents))
max_n_image = max(max_n_image, len(imgs))
# remove zero image/zero sentence cases:
before_lens = list(map(len, [train, val, test]))
train = [t for t in train if len(t[0]) > 0 and len(t[1]) > 0]
val = [t for t in val if len(t[0]) > 0 and len(t[1]) > 0]
test = [t for t in test if len(t[0]) > 0 and len(t[1]) > 0]
after_lens = list(map(len, [train, val, test]))
for bl, al, split in zip(before_lens, after_lens, ['train', 'val', 'test']):
if bl == al: continue
print('Removed {} documents from {} split that had zero images and/or sentences'.format(
bl-al, split))
print('Max n sentence={}, max n image={}'.format(max_n_sentence, max_n_image))
if args.cached_vocab:
print('Saving/loading vocab from {}'.format(args.cached_vocab))
# create vocab from training documents:
flattened_train_sents = []
for _, sents, _ in train:
flattened_train_sents.extend([s[0] for s in sents])
word2idx = text_utils.get_vocab(flattened_train_sents, cached=args.cached_vocab)
print('Vocab size was {}'.format(len(word2idx)))
if args.word2vec_binary:
we_init = text_utils.get_word2vec_matrix(
word2idx, args.cached_word_embeddings, args.word2vec_binary)
else:
we_init = np.random.uniform(low=-.02, high=.02, size=(len(word2idx), 300))
if args.end2end:
image_features = None
image_idx2row = None
else:
image_features = np.load(args.image_features)
image_idx2row = load_data(args.image_id2row)
if args.scale_image_features:
ss = sklearn.preprocessing.StandardScaler()
all_train_images = []
for img, txt, cid in train:
all_train_images.extend([x[0] for x in img])
print('standard scaling with {} images total'.format(len(all_train_images)))
all_train_rows = [image_idx2row[cid] for cid in all_train_images]
ss.fit(image_features[np.array(all_train_rows)])
image_features = ss.transform(image_features)
word_emb_dim = 300
if val[0][0][0][1] is not None:
ground_truth = True
print('The input has ground truth, so AUC will be computed.')
else:
ground_truth = False
# Step 1: Specify model inputs/outputs:
# (n docs, n sent, max n words,)
text_inp = tf.keras.layers.Input((None, args.seq_len))
# this input tells you how many sentences are really in each doc
text_n_inp = tf.keras.layers.Input((1,), dtype='int32')
if args.end2end:
# (n docs, n image, x, y, color)
img_inp = tf.keras.layers.Input((None, 224, 224, 3))
else:
# (n docs, n image, feature dim)
img_inp = tf.keras.layers.Input((None, image_features.shape[1]))
# this input tells you how many images are really in each doc
img_n_inp = tf.keras.layers.Input((1,), dtype='int32')
# Step 2: Define transformations to shared multimodal space.
# Step 2.1: The text model:
if args.text_model_checkpoint:
print('Loading pretrained text model from {}'.format(
args.text_model_checkpoint))
single_text_doc_model = tf.keras.models.load_model(args.text_model_checkpoint)
extracted_text_features = single_text_doc_model(text_inp)
else:
word_embedding = tf.keras.layers.Embedding(
len(word2idx),
word_emb_dim,
weights=[we_init] if we_init is not None else None,
mask_zero=True)
element_dropout = tf.keras.layers.SpatialDropout1D(args.dropout)
if args.rnn_type == 'GRU':
rnn_maker = tf.keras.layers.GRU
else:
rnn_maker = tf.keras.layers.LSTM
enc_layers = []
for idx in range(args.n_layers):
if idx == args.n_layers-1:
enc_layers.append(rnn_maker(args.joint_emb_dim))
else:
enc_layers.append(rnn_maker(args.joint_emb_dim, return_sequences=True))
embedded_text_inp = word_embedding(text_inp)
extracted_text_features = tf.keras.layers.TimeDistributed(element_dropout)(embedded_text_inp)
for l in enc_layers:
extracted_text_features = tf.keras.layers.TimeDistributed(l)(extracted_text_features)
# extracted_text_features is now (n docs, max n setnences, multimodal dim)
if args.l2_norm:
l2_norm_layer = tf.keras.layers.Lambda(lambda x: tf.nn.l2_normalize(x, axis=-1))
extracted_text_features = l2_norm_layer(extracted_text_features)
single_text_doc_model = tf.keras.models.Model(
inputs=text_inp,
outputs=extracted_text_features)
# Step 2.2: The image model:
if args.image_model_checkpoint:
print('Loading pretrained image model from {}'.format(
args.image_model_checkpoint))
single_img_doc_model = tf.keras.models.load_model(args.image_model_checkpoint)
extracted_img_features = single_img_doc_model(img_inp)
else:
if args.end2end:
img_projection = tf.keras.layers.Dense(args.joint_emb_dim)
from tf.keras.applications.nasnet import NASNetMobile
cnn = tf.keras.applications.nasnet.NASNetMobile(
include_top=False, input_shape=(224, 224, 3), pooling='avg')
extracted_img_features = tf.keras.layers.TimeDistributed(cnn)(img_inp)
if args.dropout > 0.0:
extracted_img_features = tf.keras.layers.TimeDistributed(
tf.keras.layers.Dropout(args.dropout))(extracted_img_features)
extracted_img_features = keras.layers.TimeDistributed(img_projection)(
extracted_img_features)
else:
extracted_img_features = tf.keras.layers.Masking()(img_inp)
if args.dropout > 0.0:
extracted_img_features = tf.keras.layers.TimeDistributed(
tf.keras.layers.Dropout(args.dropout))(extracted_img_features)
enc_layers = []
for idx in range(args.n_layers):
if idx == args.n_layers-1:
enc_layers.append(tf.keras.layers.Dense(args.joint_emb_dim))
else:
enc_layers.append(tf.keras.layers.Dense(args.joint_emb_dim, activation='relu'))
enc_layers.append(tf.keras.layers.BatchNormalization())
for l in enc_layers:
extracted_img_features = tf.keras.layers.TimeDistributed(l)(extracted_img_features)
# extracted_img_features is now (n docs, max n images, multimodal dim)
if args.l2_norm:
l2_norm_layer = tf.keras.layers.Lambda(lambda x: tf.nn.l2_normalize(x, axis=-1))
extracted_img_features = l2_norm_layer(extracted_img_features)
single_img_doc_model = tf.keras.models.Model(
inputs=img_inp,
outputs=extracted_img_features)
# Step 3: Extract/stack the non-padding image/sentence representations
def mask_slice_and_stack(inp):
stacker = []
features, n_inputs = inp
n_inputs = tf.dtypes.cast(n_inputs, tf.int32)
# for each document, we will extract the portion of input features that are not padding
# this means, for features[doc_idx], we will take the first n_inputs[doc_idx] rows.
# we stack them into one big array so we can do a big cosine sim dot product between all
# sentence image pairs in parallel. We'll slice up this array back up later.
for idx in range(args.docs_per_batch):
cur_valid_idxs = tf.range(n_inputs[idx,0])
cur_valid_features = features[idx]
feats = tf.gather(cur_valid_features, cur_valid_idxs)
stacker.append(feats)
return tf.concat(stacker, axis=0)
# extracted text/img features are (n_docs, max_in_seq, dim)
# we want to compute cosine sims between all (sent, img) pairs quickly
# so we will stack them into new tensors ...
# text_enc has shape (total number of sent in batch, dim)
# img_enc has shape (total number of image in batch, dim)
text_enc = mask_slice_and_stack([extracted_text_features, text_n_inp])
img_enc = mask_slice_and_stack([extracted_img_features, img_n_inp])
def DC_sim(sim_matrix):
text2im_S = tf.reduce_mean(tf.reduce_max(sim_matrix, 1))
im2text_S = tf.reduce_mean(tf.reduce_max(sim_matrix, 0))
return text2im_S + im2text_S
def get_k(sim_matrix):
k = tf.minimum(tf.shape(sim_matrix)[0], tf.shape(sim_matrix)[1])
if args.sim_mode_k > 0:
k = tf.dtypes.cast(k, tf.float32)
k = tf.math.ceil(tf.div(k, args.sim_mode_k))
k = tf.dtypes.cast(k, tf.int32)
return k
def TK_sim(sim_matrix):
k = get_k(sim_matrix)
im2text_S, text2im_S = tf.reduce_max(sim_matrix, 0), tf.reduce_max(sim_matrix, 1)
text2im_S = tf.reduce_mean(tf.math.top_k(text2im_S, k=k)[0], axis=-1)
im2text_S = tf.reduce_mean(tf.math.top_k(im2text_S, k=k)[0], axis=-1)
return text2im_S + im2text_S
bipartite_match_fn = bipartite_utils.generate_fast_hungarian_solving_function()
def AP_sim(sim_matrix):
k = get_k(sim_matrix)
sol = tf.numpy_function(bipartite_match_fn, [sim_matrix, k], tf.int32)
return tf.reduce_mean(tf.gather_nd(sim_matrix, sol))
if args.sim_mode == 'DC':
sim_fn = DC_sim
elif args.sim_mode == 'TK':
sim_fn = TK_sim
elif args.sim_mode == 'AP':
sim_fn = AP_sim
else:
raise NotImplementedError('{} is not implemented sim function'.format(args.sim_fn))
def make_sims(inp):
sims = tf.keras.backend.dot(inp[0], tf.keras.backend.transpose(inp[1]))
return sims
all_sims = make_sims([text_enc, img_enc])
get_pos_neg_sims = model_utils.make_get_pos_neg_sims(
args,
sim_fn)
pos_sims, neg_img_sims, neg_text_sims = tf.keras.layers.Lambda(
get_pos_neg_sims)([all_sims, text_n_inp, img_n_inp])
if args.loss_mode == 'hinge':
def per_neg_loss(inp):
pos_s, neg_s = inp
return tf.math.maximum(neg_s - pos_s + args.margin, 0)
elif args.loss_mode == 'logistic':
def per_neg_loss(inp):
pos_s, neg_s = inp
return tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(neg_s),
logits=pos_s - neg_s)
elif args.loss_mode == 'softmax':
def per_neg_loss(inp):
pos_s, neg_s = inp
pos_s -= args.margin
pos_l, neg_l = tf.ones_like(pos_s), tf.zeros_like(neg_s)
return tf.nn.softmax_cross_entropy_with_logits(
tf.concat([pos_l, neg_l], axis=1),
tf.concat([pos_s, neg_s], axis=1))
neg_img_losses = per_neg_loss([pos_sims, neg_img_sims])
neg_text_losses = per_neg_loss([pos_sims, neg_text_sims])
if args.loss_mode != 'softmax':
if args.neg_mining == 'negative_sample':
pool_fn = lambda x: tf.reduce_mean(x, axis=1, keepdims=True)
elif args.neg_mining == 'hard_negative':
pool_fn = lambda x: tf.reduce_max(x, axis=1, keepdims=True)
else:
raise NotImplementedError('{} is not a valid for args.neg_mining'.format(
args.neg_mining))
neg_img_loss = tf.keras.layers.Lambda(pool_fn, name='neg_img')(neg_img_losses)
neg_text_loss = tf.keras.layers.Lambda(pool_fn, name='neg_text')(neg_text_losses)
else:
neg_img_loss = neg_img_losses
neg_text_loss = neg_text_losses
inputs = [text_inp,
img_inp,
text_n_inp,
img_n_inp]
model = tf.keras.models.Model(inputs=inputs,
outputs=[neg_img_loss, neg_text_loss])
opt = tf.keras.optimizers.Adam(args.lr)
def identity(y_true, y_pred):
del y_true
return tf.reduce_mean(y_pred, axis=-1)
model.compile(opt, loss=identity)
if args.test_eval > 0:
train = train[:args.test_eval * args.docs_per_batch]
val = val[:args.test_eval * args.docs_per_batch]
test = test[:args.test_eval * args.docs_per_batch]
train_seq = training_utils.DocumentSequence(
train,
image_features,
image_idx2row,
max_n_sentence,
max_n_image,
word2idx,
args=args,
shuffle_docs=True,
shuffle_sentences=False,
shuffle_images=True)
val_seq = training_utils.DocumentSequence(
val,
image_features,
image_idx2row,
max_n_sentence,
max_n_image,
word2idx,
args=args,
augment=False,
shuffle_sentences=False,
shuffle_docs=False,
shuffle_images=False)
sdm = training_utils.SaveDocModels(
args.checkpoint_dir,
single_text_doc_model,
single_img_doc_model)
if args.loss_mode == 'hinge':
val_loss_thresh = 2 * args.margin # constant prediction performance
else:
val_loss_thresh = np.inf
reduce_lr = training_utils.ReduceLROnPlateauAfterValLoss(
activation_val_loss=val_loss_thresh,
factor=args.lr_decay,
patience=args.lr_patience,
min_lr=args.min_lr,
verbose=True)
callbacks = [reduce_lr, sdm]
if args.print_metrics:
metrics_printer = training_utils.PrintMetrics(
val,
image_features,
image_idx2row,
word2idx,
single_text_doc_model,
single_img_doc_model,
args)
callbacks.append(metrics_printer)
if args.lr_warmup_steps > 0:
warmup_lr = training_utils.LearningRateLinearIncrease(
args.lr,
args.lr_warmup_steps)
callbacks.append(warmup_lr)
history = model.fit(
train_seq,
epochs=args.n_epochs,
validation_data=val_seq,
callbacks=callbacks)
if args.output:
best_image_model_str, best_sentence_model_str, best_logs, best_epoch = sdm.best_checkpoints_and_logs
single_text_doc_model = tf.keras.models.load_model(best_sentence_model_str)
single_image_doc_model = tf.keras.models.load_model(best_image_model_str)
if args.scale_image_features:
with open(args.checkpoint_dir + '/image_standardscaler.pkl', 'wb') as f:
pickle.dump(ss, f)
if ground_truth and args.compute_metrics_train:
train_aucs, train_match_metrics, train_mt_metrics = eval_utils.compute_match_metrics_doc(
train,
image_features,
image_idx2row,
word2idx,
single_text_doc_model,
single_img_doc_model,
args)
else:
train_aucs, train_match_metrics, train_mt_metrics = None, None, None
if ground_truth:
val_aucs, val_match_metrics, val_mt_metrics = eval_utils.compute_match_metrics_doc(
val,
image_features,
image_idx2row,
word2idx,
single_text_doc_model,
single_img_doc_model,
args)
test_aucs, test_match_metrics, test_mt_metrics = eval_utils.compute_match_metrics_doc(
test,
image_features,
image_idx2row,
word2idx,
single_text_doc_model,
single_img_doc_model,
args)
else:
train_aucs, val_aucs, test_aucs = None, None, None
train_match_metrics, val_match_metrics, test_match_metrics = None, None, None
train_mt_metrics, val_mt_metrics, test_mt_metrics = None, None, None
output = {'logs':best_logs,
'best_sentence_model_str':best_sentence_model_str,
'best_image_model_str':best_image_model_str,
'train_aucs':train_aucs,
'train_match_metrics':train_match_metrics,
'train_mt_metrics':train_mt_metrics,
'val_aucs':val_aucs,
'val_match_metrics':val_match_metrics,
'val_mt_metrics':val_mt_metrics,
'test_aucs':test_aucs,
'test_match_metrics':test_match_metrics,
'test_mt_metrics':test_mt_metrics,
'args':args,
'epoch':best_epoch}
if args.scale_image_features:
output['image_standard_scaler_str'] = args.checkpoint_dir + '/image_standardscaler.pkl'
for k, v in history.history.items():
output['history_{}'.format(k)] = v
if args.print_metrics:
for k, v in metrics_printer.history.items():
output['metrics_history_{}'.format(k)] = v
with open(args.output, 'wb') as f:
pickle.dump(output, f, protocol=pickle.HIGHEST_PROTOCOL)
print('saved output to {}'.format(args.output))
if args.save_predictions:
for d, name in zip([train, val, test], ['train', 'val', 'test']):
out_dir = args.save_predictions + '/' + name
if not os.path.exists(out_dir):
os.makedirs(out_dir)
eval_utils.save_predictions(
d,
image_features,
image_idx2row,
word2idx,
single_text_doc_model,
single_img_doc_model,
out_dir,
args)
if __name__ == '__main__':
main()
``` |
{
"source": "jmhessel/recursive_nn_tf2",
"score": 3
} |
#### File: jmhessel/recursive_nn_tf2/recursive_nn.py
```python
import argparse
import tensorflow as tf
import numpy as np
class SimpleTreeLayer(tf.keras.layers.Layer):
def __init__(self, dim, just_root_output=True, *args, **kwargs):
super(SimpleTreeLayer, self).__init__(*args, **kwargs)
self.dim = dim
self.just_root_output = just_root_output
self.supports_masking = False
def build(self, input_shape):
self.inner_transform = tf.keras.layers.Dense(self.dim, activation='sigmoid')
self.input_transform = tf.keras.layers.Dense(self.dim, activation='sigmoid')
def compute_output_shape(self, input_shape):
if len(input_shape) == 2:
if self.just_root_output:
return (input_shape[0][0], self.dim)
else:
return (input_shape[0][0], input_shape[0][1], self.dim)
else:
if self.just_root_output:
return (input_shape[0], self.dim)
else:
return (input_shape[0], input_shape[1], self.dim)
def _combine_inner(self, reprs, features):
'''Combination function:
- reprs a list of dim lengthed vectors from child nodes
- features is a dim-lengthed input feature vector for this node.
a few conventions:
- if reprs is an empty list, you're at a leaf node, and
should proceed appropriately
- if features contains any nans for this node, it will be
ignored. this can be useful if some, but not all, nodes
have inputs.
- if features is None, then no features were handed to the
layer, and it should be ignored.
this returns the output state of the node, and should include output info,
and info required for computation from higher nodes
'''
if not (features is None):
valid_features = tf.reduce_all(tf.logical_not(tf.math.is_nan(features)))
if len(reprs) == 0: # base case
if features is None: # leaf node and no features
features = tf.zeros(self.dim)
valid_features = True
if valid_features:
return self.input_transform(tf.expand_dims(features, 0))[0]
else:
raise NotImplementedError(
'Leaf nodes should either have no features or valid features')
if not (features is None):
if valid_features:
reprs += [features]
reprs = tf.stack(reprs, axis=0)
c_mean = tf.reduce_mean(reprs, axis=0, keepdims=True)
c_max = tf.reduce_mean(reprs, axis=0, keepdims=True)
c_min = tf.reduce_min(reprs, axis=0, keepdims=True)
trans = self.inner_transform(tf.concat([c_mean, c_max, c_min], axis=1))
return trans[0]
def _encode_tree(self, tree_enc, node_features):
state = [None for _ in range(tree_enc.shape[0])]
def _encode_tree_rec(cur_idx):
ch_start, ch_end = tree_enc[cur_idx][0], tree_enc[cur_idx][1]
if ch_start == -1:
if node_features is None:
state[cur_idx] = self._combine_inner([], node_features)
else:
state[cur_idx] = self._combine_inner([], node_features[cur_idx])
else:
for child_idx in range(ch_start, ch_end):
_encode_tree_rec(child_idx)
if node_features is None:
state[cur_idx] = self._combine_inner(
state[ch_start: ch_end], node_features)
else:
state[cur_idx] = self._combine_inner(
state[ch_start: ch_end], node_features[cur_idx])
_encode_tree_rec(0)
if self.just_root_output:
return state[0]
else:
# turn the Nones in state, i.e., the padding nodes, into zeros
state = [s if not (s is None) else tf.zeros(self.dim) for s in state]
return tf.stack(state, axis=0)
def call(self, inputs):
if isinstance(inputs, list):
assert len(inputs) == 2, 'Inputs must be either [tree, features] or just tree'
structure, features = inputs
else:
structure, features = inputs, [None for _ in range(inputs.shape[0])]
outputs = []
for b_idx in range(structure.shape[0]):
outputs.append(self._encode_tree(structure[b_idx], features[b_idx]))
return tf.stack(outputs, axis=0)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--dim',
type=int,
default=10)
parser.add_argument(
'--seed',
type=int,
default=1)
return parser.parse_args()
def main():
'''
Main to show off some features...
'''
args = parse_args()
np.random.seed(args.seed)
tf.random.set_seed(args.seed)
# trees are represented by tree = (batch, max_nodes, 2) shaped tensor
# where tree[i, j] gives the start (inclusive) and end (exclusive) indices
# of the children.
# this implementation also supports arbitrary features passed for each internal
# node.
test_tree_structure = [[1, 4], # root node has 3 children
[-1,-1], # first child has no children
[-1,-1], # second child has no children
[4, 6], # third child has 2 children
[-1, -1], # no children for first child of third
[-1, -1], # no children for second child of third
[-1, -1]] # just a dummy padding node
# order of children doesnt matter to simple tree layer
idx_equiv = np.array([0, 3, 2, 1, 4, 5, 6])
test_tree_structure_equiv = np.array(test_tree_structure)[idx_equiv]
# add a batch dim
test_tree_structure = np.expand_dims(test_tree_structure, 0)
test_tree_structure_equiv = np.expand_dims(test_tree_structure_equiv, 0)
simple = SimpleTreeLayer(args.dim, dynamic=True)
# example without node features. The leaf nodes are assumed to have zero
# features, and combination happens from there.
res_no_features = simple(test_tree_structure)
res_no_features_equiv = simple(test_tree_structure_equiv)
np.testing.assert_allclose(res_no_features, res_no_features_equiv)
# equivalent example --- internal node features are ignored by setting
# to nan, leaf nodes are zero
test_tree_zero_features = np.zeros((7, args.dim)).astype(np.float32)
inner_idxs = np.array([0, 3])
test_tree_zero_features[inner_idxs,:] = np.nan
test_tree_zero_features = np.expand_dims(test_tree_zero_features, 0)
test_tree_zero_features_equiv = test_tree_zero_features[:, idx_equiv, :]
res_no_features_2 = simple([test_tree_structure, test_tree_zero_features])
res_no_features_2_equiv = simple([test_tree_structure_equiv, test_tree_zero_features_equiv])
np.testing.assert_allclose(res_no_features, res_no_features_2)
np.testing.assert_allclose(res_no_features, res_no_features_2_equiv)
# of course, you can also hand arbitrary input features for each node
features = np.random.random((1, 7, args.dim)).astype(np.float32)
features_equiv = features[:, idx_equiv, :]
res_features = simple([test_tree_structure, features])
res_features_equiv = simple([test_tree_structure_equiv, features_equiv])
np.testing.assert_allclose(res_features, res_features_equiv)
# and you can ignore any input features, if your tree layer supports it
# ignore root node
features[0, 0] = np.nan
res_features_ignore_root = simple([test_tree_structure, features])
# and you can return the state of all of the nodes, too...
simple_all_nodes = SimpleTreeLayer(args.dim, just_root_output=False, dynamic=True)
res_features_ignore_root_all = simple_all_nodes([test_tree_structure, features])
# padding nodes will be assigned zero
print(res_features_ignore_root_all)
# you can use layers in models too
tree_input = tf.keras.layers.Input((None, 2), dtype='int32')
tree_features_input = tf.keras.layers.Input((None, 100), dtype='float32')
model_layer = SimpleTreeLayer(args.dim, dynamic=True, just_root_output=False)
res = model_layer([tree_input, tree_features_input])
model = tf.keras.models.Model(inputs=[tree_input, tree_features_input],
outputs=res)
model.summary()
if __name__ == '__main__':
main()
``` |
{
"source": "jmhIcoding/flowcontainer",
"score": 3
} |
#### File: flowcontainer/flowcontainer/flows.py
```python
from datetime import datetime
import ipaddress
################################################################################
# Single Flow object #
################################################################################
class Flow(object):
"""Flow object extracted from pcap file that can be used for fingerprinting
Attributes
----------
src : string
Source IP
sport : int
Source port
dst : string
Destination IP
dport : int
Destination port
source : tuple
(Source IP, source port) tuple
destination : tuple
(Destination IP, destination port) tuple
time_start : int
Timestamp of first packet in flow
time_end : int
Timestamp of last packet in flow
ip_lengths : list
List of packet length for each ip packet in flow
payload_lengths : list
List of payload sequence for each tcp/udp fragment with non-zero payload in flow.
ip_timestamps : list
List of timestamps corresponding to each ip packet in flow, it may contain packets without any tcp/udp payload.
payload_timestamps: list
List of timestamps corresponding to each tcp/udp fragment with non-zero payload in flow.
extension : dict
Dict of extension, where the keys are items which are passed through flowcontainer.extractor.extract functions.
the values `extension[key]` are list of tuple, where each tuple is (value,packet_id).
"""
def __init__(self,main='payload'):
"""
param
-----------
main: str
'payload' means the main lengths sequence and timestampes sequence refer to packets with non-zero payload, the sequences will fitler out zero payload packets.
'ip' means the main lengths sequence and timestamps sequence refer to any packets, it will not filter any packets.
"""
self.main = main
"""Initialise an empty Flow."""
# Initialise flow endpoints
self.src = None
self.sport = None
self.dst = None
self.dport = None
# Initialise extension
self.extension = dict()
# Initialise packet lengths
self.ip_lengths = list()
self.payload_lengths = list()
# Initialise packet timestamps
self.ip_timestamps = list()
self.payload_timestamps = list() #non-zero payload packet's timestamp sequence.
# Refer the main property
self.lengths = self.payload_lengths if main=='payload' else self.ip_lengths
self.timestamps = self.payload_timestamps if main=='payload' else self.ip_timestamps
########################################################################
# Add new packet to flow #
########################################################################
def add(self, packet,extension):
"""Add a new packet to the flow.
Parameters
----------
packet : np.array of shape=(n_features,)
Packet from Reader.
Returns
-------
self : self
Returns self
"""
#print(packet)
try:
# Extract IPs from packet
ip_a, ip_b = packet[5], packet[6]
except BaseException as exp:
raise ValueError('Parse ip address error, this is not ip packet! Please pass the filter parameter with `(tcp or udp)` when invoke flowcontainer.extractor.extract()!')
try:
# Extract ports from packet
port_a, port_b = int(packet[7]), int(packet[8])
except BaseException as exp:
raise ValueError('Parse TCP/UDP port error, this ip packet may not be a sample of tcp or udp or gre. Please pass the filter parameter with `(tcp or udp)` when invoke flowcontainer.extractor.extract()!')
# Perform packet check
if self.src is not None:
if {self.src, self.dst} != {ip_a, ip_b} and {self.sport, self.dport} != {port_a, port_b}:
raise ValueError("Packet {} incompatible with flow {}" .format(packet, self))
# Set endpoints where smallest dport is destination
elif port_a > port_b:
self.src , self.dst = ip_a , ip_b
self.sport, self.dport = port_a, port_b
else:
self.src , self.dst = ip_b , ip_a
self.sport, self.dport = port_b, port_a
# Add extension if any
for i in range(len(packet[-1])):
if packet[-1][i] != "":
if extension[i] not in self.extension:
self.extension.setdefault(extension[i],[])
self.extension[extension[i]].append((packet[-1][i],len(self.ip_lengths)))
# Set timestamps and lengths
#print(packet)
self.ip_timestamps.append(float(packet[3]))
self.ip_lengths .append( int(packet[4]) if packet[5] == self.src else
-int(packet[4]))
if int(packet[9]) != 0:
try:
self.payload_lengths.append( int(packet[9]) if packet[5] == self.src else
-int(packet[9]))
self.payload_timestamps.append(float(packet[3]))
except BaseException as exp:
raise ValueError('Parser payload length and timestamp error, this ip packet may not be a sample of tcp or udp. Please pass the filter parameter with `(tcp or udp)` when invoke flowcontainer.extractor.extract()!')
# Return self
return self
########################################################################
# Source/Destination/Time attributes #
########################################################################
@property
def source(self):
"""(source IP, source port)-tuple of Flow"""
return (self.src, self.sport)
@property
def destination(self):
"""(destination IP, destination port)-tuple of Flow"""
return (self.dst, self.dport)
@property
def time_start(self):
"""Returns start time of Flow"""
return min(self.timestamps)
@property
def time_end(self):
"""Returns end time of Flow"""
return max(self.timestamps)
########################################################################
# Class overrides #
########################################################################
def __len__(self):
"""Return length of Flow in packets."""
return len(self.lengths)
def __str__(self):
"""Return string representation of flow."""
if self.main=='ip':
return "[Time {} to {}] {:>15}:{:<5} <-> {:>15}:{:<5} [IP Packet Size Length {}] [extension: {}]".format(
datetime.fromtimestamp(min(self.timestamps)).strftime("%H:%M:%S.%f"),
datetime.fromtimestamp(max(self.timestamps)).strftime("%H:%M:%S.%f"),
self.src, self.sport, self.dst, self.dport,
len(self),self.extension)
else:
return "[Time {} to {}] {:>15}:{:<5} <-> {:>15}:{:<5} [Payload Packet Size Length {}] [extension: {}]".format(
datetime.fromtimestamp(min(self.timestamps)).strftime("%H:%M:%S.%f"),
datetime.fromtimestamp(max(self.timestamps)).strftime("%H:%M:%S.%f"),
self.src, self.sport, self.dst, self.dport,
len(self),self.extension)
def __gt__(self, other):
"""Greater than object override"""
return min(self.timestamps) > min(other.timestamps)
def __ge__(self, other):
"""Greater equals object override"""
return min(self.timestamps) >= min(other.timestamps)
def __lt__(self, other):
"""Less than object override"""
return min(self.timestamps) < min(other.timestamps)
def __le__(self, other):
"""Less equals object override"""
return min(self.timestamps) <= min(other.timestamps)
``` |
{
"source": "jmhobbs/dun-dun-duh",
"score": 2
} |
#### File: dun-dun-duh/dundunduh/__init__.py
```python
import os
import socket
from flask import Flask, url_for
from .config import BaseConfig
from .views import register_views
from .extensions import rq, redis
app = Flask(__name__)
##############################
# Load Config
app.config['UPLOAD_FOLDER'] = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'uploads')
app.config.from_object(BaseConfig)
app.config.from_envvar('CONFIG', silent=True)
app.debug = app.config.get('DEBUG', False)
##############################
# Attach Views
register_views(app)
##############################
# Init Extensions
rq.init_app(app)
redis.init_app(app)
##############################
# Configure Templating
@app.template_filter()
def url_for_gif(slug):
filename = slug + ".gif"
if app.config.get('UPLOAD_URL_FORMAT_STRING'):
return app.config.get('UPLOAD_URL_FORMAT_STRING') % {"filename": filename}
else:
return url_for('uploaded_file', filename=filename, _external=True)
@app.template_filter()
def url_for_still(slug):
filename = slug + ".jpg"
if app.config.get('UPLOAD_URL_FORMAT_STRING'):
return app.config.get('UPLOAD_URL_FORMAT_STRING') % {"filename": filename}
else:
return url_for('uploaded_file', filename=filename, _external=True)
@app.context_processor
def inject_globals():
return dict(
g_ENVIRONMENT=app.config.get('ENV'),
g_HOSTNAME=socket.gethostname(),
g_IS_PRODUCTION=('PRODUCTION' == app.config.get('ENV')),
g_SERVER_NAME=app.config.get('SERVER_NAME'),
g_GOOGLE_ANALYTICS_ID=app.config.get('GOOGLE_ANALYTICS_ID')
)
```
#### File: dun-dun-duh/dundunduh/records.py
```python
import json
from datetime import datetime
from pytz import timezone
from flask import current_app
from .extensions import redis
rolling_average_lua = """
local i = redis.call('GET', KEYS[1])
local cai = redis.call('GET', KEYS[2])
i = tonumber(i)
cai = tonumber(cai)
if i == nil then
i = 0
end
if cai == nil then
cai = 0
end
cai = (tonumber(ARGV[1]) + (i * cai)) / (i + 1)
redis.call('SET', KEYS[2], cai)"""
def create_gif(slug, ip, queue_time, start_rendering, wait_duration, render_duration, store_duration, total_queue_duration):
tz = timezone(current_app.config.get('TIMEZONE', 'UTC'))
dt = datetime.fromtimestamp(queue_time)
dt = tz.localize(dt)
record = json.dumps({
"slug": slug,
"ip": ip,
"timestamp": queue_time,
"durations": {
"wait": wait_duration,
"render": render_duration,
"store": store_duration,
"total": total_queue_duration
}
})
rolling_average_script = redis.connection.register_script(rolling_average_lua)
pipe = redis.pipeline()
pipe.lpush('gifs', record)
pipe.incr('stats:created')
pipe.incr('stats:created:%d-%02d-%02d' % (dt.year, dt.month, dt.day))
pipe.incr('stats:created:%d-%02d-%02d %02d' % (dt.year, dt.month, dt.day, dt.hour))
pipe.incr('stats:created:%d-%02d-%02d %02d:%02d' % (dt.year, dt.month, dt.day, dt.hour, (dt.minute / 5) * 5))
rolling_average_script(keys=['stats:created', 'stats:average:total'], args=[total_queue_duration], client=pipe)
rolling_average_script(keys=['stats:created', 'stats:average:wait'], args=[wait_duration], client=pipe)
rolling_average_script(keys=['stats:created', 'stats:average:render'], args=[render_duration], client=pipe)
rolling_average_script(keys=['stats:created', 'stats:average:store'], args=[store_duration], client=pipe)
rolling_average_script(keys=['stats:created', 'stats:average:total:%d-%02d-%02d' % (dt.year, dt.month, dt.day)], args=[total_queue_duration], client=pipe)
rolling_average_script(keys=['stats:created', 'stats:average:wait:%d-%02d-%02d' % (dt.year, dt.month, dt.day)], args=[wait_duration], client=pipe)
rolling_average_script(keys=['stats:created', 'stats:average:render:%d-%02d-%02d' % (dt.year, dt.month, dt.day)], args=[render_duration], client=pipe)
rolling_average_script(keys=['stats:created', 'stats:average:store:%d-%02d-%02d' % (dt.year, dt.month, dt.day)], args=[store_duration], client=pipe)
rolling_average_script(keys=['stats:created', 'stats:average:total:%d-%02d-%02d %02d' % (dt.year, dt.month, dt.day, dt.hour)], args=[total_queue_duration], client=pipe)
rolling_average_script(keys=['stats:created', 'stats:average:wait:%d-%02d-%02d %02d' % (dt.year, dt.month, dt.day, dt.hour)], args=[wait_duration], client=pipe)
rolling_average_script(keys=['stats:created', 'stats:average:render:%d-%02d-%02d %02d' % (dt.year, dt.month, dt.day, dt.hour)], args=[render_duration], client=pipe)
rolling_average_script(keys=['stats:created', 'stats:average:store:%d-%02d-%02d %02d' % (dt.year, dt.month, dt.day, dt.hour)], args=[store_duration], client=pipe)
rolling_average_script(keys=['stats:created', 'stats:average:total:%d-%02d-%02d %02d:%02d' % (dt.year, dt.month, dt.day, dt.hour, (dt.minute / 5) * 5)], args=[total_queue_duration], client=pipe)
rolling_average_script(keys=['stats:created', 'stats:average:wait:%d-%02d-%02d %02d:%02d' % (dt.year, dt.month, dt.day, dt.hour, (dt.minute / 5) * 5)], args=[wait_duration], client=pipe)
rolling_average_script(keys=['stats:created', 'stats:average:render:%d-%02d-%02d %02d:%02d' % (dt.year, dt.month, dt.day, dt.hour, (dt.minute / 5) * 5)], args=[render_duration], client=pipe)
rolling_average_script(keys=['stats:created', 'stats:average:store:%d-%02d-%02d %02d:%02d' % (dt.year, dt.month, dt.day, dt.hour, (dt.minute / 5) * 5)], args=[store_duration], client=pipe)
pipe.execute()
def create_gif_failed(queue_time):
tz = timezone(current_app.config.get('TIMEZONE', 'UTC'))
dt = datetime.fromtimestamp(queue_time)
dt = tz.localize(dt)
pipe = redis.pipeline()
pipe.incr('stats:failed')
pipe.incr('stats:failed:%d-%02d-%02d' % (dt.year, dt.month, dt.day))
pipe.incr('stats:failed:%d-%02d-%02d %02d' % (dt.year, dt.month, dt.day, dt.hour))
pipe.incr('stats:failed:%d-%02d-%02d %02d:%02d' % (dt.year, dt.month, dt.day, dt.hour, (dt.minute / 5) * 5))
pipe.execute()
def create_gif_cancelled(queue_time):
tz = timezone(current_app.config.get('TIMEZONE', 'UTC'))
dt = datetime.fromtimestamp(queue_time)
dt = tz.localize(dt)
pipe = redis.pipeline()
pipe.incr('stats:cancelled')
pipe.incr('stats:cancelled:%d-%02d-%02d' % (dt.year, dt.month, dt.day))
pipe.incr('stats:cancelled:%d-%02d-%02d %02d' % (dt.year, dt.month, dt.day, dt.hour))
pipe.incr('stats:cancelled:%d-%02d-%02d %02d:%02d' % (dt.year, dt.month, dt.day, dt.hour, (dt.minute / 5) * 5))
pipe.execute()
def get_recent_gifs(count):
'''
Returns `count` recent GIF records sorted newest to oldest.
'''
return map(json.loads, redis.lrange('gifs', 0, count - 1))
def get_all_time_average(type):
avg = redis.get('stats:average:%s' % (type,))
if not avg:
return 0
return float(avg)
def get_daily_average(dt, type):
avg = redis.get('stats:average:%s:%d-%02d-%02d' % (type, dt.year, dt.month, dt.day))
if not avg:
return 0
return float(avg)
def get_hourly_average(dt, type):
avg = redis.get('stats:average:%s:%d-%02d-%02d %02d' % (type, dt.year, dt.month, dt.day, dt.hour))
if not avg:
return 0
return float(avg)
def get_five_minute_segment_average(dt, type):
avg = redis.get('stats:average:%s:%d-%02d-%02d %02d:%02d' % (type, dt.year, dt.month, dt.day, dt.hour, (dt.minute / 5) * 5))
if not avg:
return 0
return float(avg)
def get_all_time_created():
count = redis.get('stats:created')
if not count:
return 0
return int(count)
def get_daily_created(dt):
count = redis.get('stats:created:%d-%02d-%02d' % (dt.year, dt.month, dt.day))
if not count:
return 0
return int(count)
def get_hourly_created(dt):
count = redis.get('stats:created:%d-%02d-%02d %02d' % (dt.year, dt.month, dt.day, dt.hour))
if not count:
return 0
return int(count)
def get_five_minute_segment_created(dt):
count = redis.get('stats:created:%d-%02d-%02d %02d:%02d' % (dt.year, dt.month, dt.day, dt.hour, (dt.minute / 5) * 5))
if not count:
return 0
return int(count)
def get_all_time_failed():
count = redis.get('stats:failed')
if not count:
return 0
return int(count)
def get_daily_failed(dt):
count = redis.get('stats:failed:%d-%02d-%02d' % (dt.year, dt.month, dt.day))
if not count:
return 0
return int(count)
def get_hourly_failed(dt):
count = redis.get('stats:failed:%d-%02d-%02d %02d' % (dt.year, dt.month, dt.day, dt.hour))
if not count:
return 0
return int(count)
def get_five_minute_segment_failed(dt):
count = redis.get('stats:failed:%d-%02d-%02d %02d:%02d' % (dt.year, dt.month, dt.day, dt.hour, (dt.minute / 5) * 5))
if not count:
return 0
return int(count)
def get_all_time_cancelled():
count = redis.get('stats:cancelled')
if not count:
return 0
return int(count)
def get_daily_cancelled(dt):
count = redis.get('stats:cancelled:%d-%02d-%02d' % (dt.year, dt.month, dt.day))
if not count:
return 0
return int(count)
def get_hourly_cancelled(dt):
count = redis.get('stats:cancelled:%d-%02d-%02d %02d' % (dt.year, dt.month, dt.day, dt.hour))
if not count:
return 0
return int(count)
def get_five_minute_segment_cancelled(dt):
count = redis.get('stats:cancelled:%d-%02d-%02d %02d:%02d' % (dt.year, dt.month, dt.day, dt.hour, (dt.minute / 5) * 5))
if not count:
return 0
return int(count)
```
#### File: dundunduh/renderers/gifsicle.py
```python
import tempfile
import subprocess
def make_animated_gif(stream, images, delays):
# This command is not as optimized for size, but might yield better quality.
command = ['gifsicle', '-m', '-O2', '--dither', '--loopcount=forever']
#command = ['gifsicle', '-m', '-O2', '--dither', '--colors', '255', '--loopcount=forever']
temp_files = []
i = 0
for image in images:
temp = tempfile.NamedTemporaryFile(suffix=".gif")
image.save(temp)
command.append('-d%d' % delays[i])
command.append(temp.name)
i += 1
temp_files.append(temp)
subprocess.call(command, stdout=stream)
for temp in temp_files:
temp.close()
del temp_files
``` |
{
"source": "jmhobbs/pircie",
"score": 3
} |
#### File: formats/pdi/pdi.py
```python
import zlib
import bz2
from struct import unpack
import types
import sdictviewer
from sdictviewer.dictutil import *
class GzipCompression:
def __str__(self):
return "gzip"
def decompress(self, string):
return zlib.decompress(string)
class Bzip2Compression:
def __str__(self):
return "bzip2"
def decompress(self, string):
return bz2.decompress(string)
class NoCompression:
def __str__(self):
return "no compression"
def decompress(self, string):
return string
def read_raw(s, fe):
return s[fe.offset:fe.offset + fe.length]
def read_str(s, fe):
raw = read_raw(s, fe)
return raw.replace('\x00', '');
def read_int(s, fe = None):
raw = read_raw(s, fe) if fe else s
return unpack('<I', raw)[0]
def read_short(raw):
return unpack('<H', raw)[0]
def read_byte(raw):
return unpack('<B', raw)[0]
class FormatElement:
def __init__(self, offset, length, elementType = None):
self.offset = offset
self.length = length
self.elementType = elementType
class Header:
f_signature = FormatElement(0x0, 4)
f_input_lang = FormatElement(0x4, 3)
f_output_lang = FormatElement(0x7, 3)
f_compression = FormatElement(0xa, 1)
f_num_of_words = FormatElement(0xb, 4)
f_length_of_short_index=FormatElement(0xf, 4)
f_title=FormatElement(0x13, 4)
f_copyright=FormatElement(0x17, 4)
f_version=FormatElement(0x1b, 4)
f_short_index=FormatElement(0x1f, 4)
f_full_index=FormatElement(0x23, 4)
f_articles=FormatElement(0x27, 4)
def parse(self, str):
self.signature = read_str(str, self.f_signature)
if self.signature != 'pdi!':
raise DictFormatError, "Not a valid pdi dictionary"
self.word_lang = read_str(str, self.f_input_lang)
self.article_lang = read_str(str, self.f_output_lang)
self.short_index_length = read_int(str, self.f_length_of_short_index)
comp_and_index_levels_byte = read_byte(read_raw(str, self.f_compression))
self.compressionType = comp_and_index_levels_byte & int("00001111", 2)
self.short_index_depth = comp_and_index_levels_byte >> 4
self.num_of_words = read_int(str, self.f_num_of_words)
self.title_offset = read_int(str, self.f_title)
self.copyright_offset = read_int(str, self.f_copyright)
self.version_offset = read_int(str, self.f_version)
self.articles_offset = read_int(str, self.f_articles)
self.short_index_offset = read_int(str, self.f_short_index)
self.full_index_offset = read_int(str, self.f_full_index)
compressions = {0:NoCompression(), 1:GzipCompression(), 2:Bzip2Compression()}
class Word:
def __init__(self, dictionary, word):
self.dictionary = dictionary
self.encoding = dictionary.encoding
self.collator = dictionary.collator
self.article_ptr = None
self.unicode = None
self.word = None
self.sortkey = None
self.word_lang = dictionary.header.word_lang
self.article_lang = dictionary.header.article_lang
if type(word) is types.UnicodeType:
self.unicode = word
self.word = self.unicode.encode(self.encoding)
else:
self.word = word
try:
self.unicode = self.word.decode(self.encoding)
except UnicodeDecodeError:
self.unicode = "error".decode(self.encoding)
print "Unable to decode:", self.encoding, word
if self.collator == None:
self.sortkey = str(self)
else:
self.sortkey = self.collator.sort_key(self.unicode)
def __str__(self):
return self.word
def __eq__(self, other):
return self.sortkey == other.sortkey
def __cmp__(self, other):
return cmp(self.sortkey, other.sortkey)
def __unicode__(self):
return self.unicode
def startswith(self, s):
ssk = s.sortkey
return self.sortkey[0:len(ssk)] == ssk
def get_article(self):
return self.dictionary.read_article(self.article_ptr)
class Dictionary:
def __init__(self, file_name):
self.file_name = file_name
self.file = open(file_name, "rb");
self.header = Header()
self.header.parse(self.file.read(43))
self.compression = compressions[self.header.compressionType]
self.title = self.read_unit(self.header.title_offset)
self.version = self.read_unit(self.header.version_offset)
self.copyright = self.read_unit(self.header.copyright_offset)
self.encoding = "utf-8"
self.collator = sdictviewer.ucollator
self.word_list = None
def __eq__(self, other):
return self.key() == other.key()
def __str__(self):
return self.file_name
def __hash__(self):
return self.key().__hash__()
def key(self):
return (self.title, self.version, self.file_name)
def read_unit(self, pos):
f = self.file
f.seek(pos);
record_length= read_int(f.read(4))
s = f.read(record_length)
s = self.compression.decompress(s)
return s
def find_index_entry(self, word):
low = self.header.full_index_offset
high = self.header.articles_offset - 1
probe = -1
while True:
prevprobe = probe
probe = low + int((high-low)/2)
probe = self.findword(probe)
if probe == prevprobe:
return low
next_offset, probeword = self.read_full_index_item(probe)
if probeword == word:
return probe
if probeword > word:
high = probe
else:
low = probe
def findword(self, pos):
self.file.seek(pos)
b = ""
start = -1
while (start == -1) and (pos + len(b) < self.header.articles_offset):
b = ''.join([b, self.file.read(128)])
start = b.find("\xFE\xFE\xFE\xFE")
if start == -1:
raise Exception("could not find start position in long index: " + str(pos))
return pos + start
def get_word_list_iter(self, start_string):
start_word = Word(self, start_string)
next_ptr = self.find_index_entry(start_word)
found = False
while True:
if next_ptr < 0:
raise StopIteration
next_offset, word = self.read_full_index_item(next_ptr)
if word.startswith(start_word):
found = True
yield WordLookup(str(word), word.dictionary, word.article_ptr)
else:
if word > start_word:
raise StopIteration
next_ptr += next_offset
def read_full_index_item(self, pointer):
f = self.file
f.seek(pointer)
s = f.read(16)
next_word_offset = unpack('<I', s[4:8])[0]
article_ptr = unpack('<I', s[12:16])[0]
word = f.read(next_word_offset - 16)
word = Word(self, word)
word.article_ptr = article_ptr
return next_word_offset, word
def read_article(self, pointer):
return self.read_unit(self.header.articles_offset + pointer)
def load(self):
return
def index(self, items):
return
def close(self, save_index = True):
self.file.close()
```
#### File: pircie/pircie/irc.py
```python
from twisted.words.protocols import irc
from twisted.internet import reactor, protocol, task
class IRCBot ( irc.IRCClient ):
plugins = None
versionName = "pircie-bot"
versionNum = "0.1"
sourceURL = "http://github.com/jmhobbs/pircie"
username = "%s-%s" % ( versionName, versionNum )
nickname = None
channel = None
lineRate = 2
def try_say( self, message, silent=False ):
"""
Attempts to send the given message to the channel.
"""
try:
self.say( self.channel, message )
if not silent:
self.privmsg( self.nickname, self.channel, message )
return True
except:
return False
def connectionMade ( self ):
irc.IRCClient.connectionMade( self )
for plugin in self.plugins.get_plugins_by_hook( 'MADE_CONNECTION' ):
if False == plugin.MADE_CONNECTION( self ):
break
def connectionLost( self, reason ):
irc.IRCClient.connectionLost( self, reason )
for plugin in self.plugins.get_plugins_by_hook( 'LOST_CONNECTION' ):
if False == plugin.LOST_CONNECTION( self, reason ):
break
def signedOn( self ):
for plugin in self.plugins.get_plugins_by_hook( 'SIGNED_ON' ):
if False == plugin.SIGNED_ON( self):
break
self.join( self.channel )
def joined( self, channel ):
for plugin in self.plugins.get_plugins_by_hook( 'JOINED' ):
if False == plugin.JOINED( self, channel ):
break
def left( self, channel ):
for plugin in self.plugins.get_plugins_by_hook( 'LEFT' ):
if False == plugin.LEFT( self, channel ):
break
def privmsg ( self, user, channel, msg ):
if channel == self.nickname:
for plugin in self.plugins.get_plugins_by_hook( 'WHISPER' ):
if False == plugin.WHISPER( self, user, msg ):
break
else:
first_word = msg.split( ' ' )[0]
if first_word == self.nickname or first_word == self.nickname + ':' or first_word == '@' + self.nickname or first_word == '@' + self.nickname + ':':
for plugin in self.plugins.get_plugins_by_hook( 'ATME' ):
atme_msg = " ".join( msg.split( ' ' )[1:] )
if False == plugin.ATME( self, user, channel, atme_msg ):
break
for plugin in self.plugins.get_plugins_by_hook( 'MESSAGE' ):
if False == plugin.MESSAGE( self, user, channel, msg ):
break
def action ( self, user, channel, msg ):
for plugin in self.plugins.get_plugins_by_hook( 'ACTION' ):
if False == plugin.ACTION( self, user, channel, msg ):
break
def irc_NICK ( self, prefix, params ):
old_nick = prefix.split('!')[0]
new_nick = params[0]
for plugin in self.plugins.get_plugins_by_hook( 'NICK_CHANGE' ):
if False == plugin.NICK_CHANGE( self, old_nick, new_nick ):
break
class IRCBotFactory( protocol.ReconnectingClientFactory ):
protocol = IRCBot
def __init__( self, plugins, channel, nickname, **kwargs ):
IRCBot.plugins = plugins
IRCBot.channel = channel
IRCBot.nickname = nickname
# Now load our args
if 'versionName' in kwargs:
IRCBot.versionName = kwargs['versionName']
if 'versionNum' in kwargs:
IRCBot.versionNum = kwargs['versionNum']
if 'sourceURL' in kwargs:
IRCBot.sourceURL = kwargs['sourceURL']
if 'username' in kwargs:
IRCBot.username = kwargs['username']
def clientConnectionFailed( self, connector, reason ):
reactor.stop()
```
#### File: pircie/plugins/webdefine.py
```python
from xml.dom.minidom import parseString
import socket
from urllib import urlencode
from urllib2 import Request, urlopen, URLError, HTTPError
class Plugin:
hooks = [ 'ATME' ]
# Alternate sources you could try:
# http://ninjawords.com/definitions/get/[WORD] (add POST variable 123 with no value)
# http://www.google.com/dictionary/json?callback=dict_api.callbacks.id100&q=[WORD]&sl=en&tl=en&restrict=pr%2Cde&client=te
url = "http://services.aonaware.com/DictService/DictService.asmx/DefineInDict"
def __init__ ( self ):
# Set the timeout to something reasonable
socket.setdefaulttimeout( 5 )
def ATME ( self, bot, user, channel, message ):
print message
if 'WEBDEFINE' == message.split( ' ' )[0]:
define = " ".join( message.split( ' ' )[1:] )
data = urlencode( { 'dictId': 'wn', 'word': define } )
request = Request( "%s?%s" % ( self.url, data ) )
try:
response = urlopen( request )
except HTTPError, e:
bot.try_say( 'The definition server returned an error code: %d' % e.code )
except URLError, e:
print 'We failed to reach a server.'
bot.try_say( 'Failed to reach the definition server: %s' % e.reason )
else:
dom = parseString( response.read() )
try:
# TODO Cache this response locally?
bot.try_say( "%s: %s" % ( define, dom.childNodes[0].getElementsByTagName( "Definitions" )[0].getElementsByTagName( "Definition" )[0].getElementsByTagName( "WordDefinition" )[0].childNodes[0].data ) )
return False
except:
bot.try_say( "No definition found for '%s'." % define )
``` |
{
"source": "jmhodges/atheris",
"score": 2
} |
#### File: atheris/src/regex_match_generation_test.py
```python
import re
import sre_parse
from google3.testing.pybase import parameterized, googletest
from atheris import gen_match
TESTS = [
(r"abc"),
(r"abc|def"),
(r"(abc|\d+)"),
(r"(?:abc){3,}"),
(r"(?:abc){,3}"),
(r"(?=abc)"),
(r"(?<!abc)"),
(r"[^abc]abc"),
(r"[abc]abc"),
]
class RegexTests(parameterized.TestCase):
@parameterized.parameters(TESTS)
def testRegExMatchGeneration(self, test_input):
match = gen_match(sre_parse.parse(test_input))
if re.match(test_input, match) is None:
raise AssertionError(f"Could not generate RegEx Match for {test_input}")
if __name__ == "__main__":
googletest.main()
``` |
{
"source": "jmhodges/certificate-transparency",
"score": 3
} |
#### File: ct/cert_analysis/ocsp_pointers_test.py
```python
import mock
import unittest
from ct.cert_analysis import base_check_test
from ct.cert_analysis import ocsp_pointers
from ct.crypto import cert
CRYPTO_TEST_DATA_DIR = "ct/crypto/testdata/"
CERT_WITH_OCSP = cert.Certificate.from_pem_file(CRYPTO_TEST_DATA_DIR +
"aia.pem")
CERT_WITHOUT_OCSP = cert.Certificate.from_pem_file(CRYPTO_TEST_DATA_DIR +
"promise_com.pem")
class OcspPointersTest(base_check_test.BaseCheckTest):
def test_ocsp_existence_exist(self):
check = ocsp_pointers.CheckOcspExistence()
result = check.check(CERT_WITH_OCSP)
self.assertIsNone(result)
def test_ocsp_existence_doesnt_exist(self):
check = ocsp_pointers.CheckOcspExistence()
result = check.check(CERT_WITHOUT_OCSP)
self.assertObservationIn(ocsp_pointers.LackOfOcsp(), result)
def test_ocsp_extension_corrupt(self):
certificate = mock.MagicMock()
certificate.ocsp_responders = mock.Mock(
side_effect=cert.CertificateError("Corrupt or unrecognized..."))
check = ocsp_pointers.CheckCorruptOrMultipleAiaExtension()
result = check.check(certificate)
self.assertObservationIn(ocsp_pointers.CorruptAiaExtension(), result)
def test_ocsp_extension_multiple(self):
certificate = mock.MagicMock()
certificate.ocsp_responders = mock.Mock(
side_effect=cert.CertificateError("Multiple extension values"))
check = ocsp_pointers.CheckCorruptOrMultipleAiaExtension()
result = check.check(certificate)
self.assertObservationIn(ocsp_pointers.MultipleOcspExtensions(), result)
if __name__ == '__main__':
unittest.main()
```
#### File: ct/cert_analysis/tld_list_test.py
```python
import unittest
from ct.cert_analysis import tld_list
TLD_DIR = "ct/cert_analysis/test_data/"
TLD_FILE = "test_tld_list"
class TLDListTest(unittest.TestCase):
def default_list(self):
return tld_list.TLDList(tld_dir=TLD_DIR,
tld_file_name=TLD_FILE)
def test_tld_list_example_matches(self):
url = "example.com"
tlds = self.default_list()
self.assertEqual("com", tlds.match(url))
def test_tld_list_doesnt_match(self):
url = "kokojambo.i.do.przodu"
tlds = self.default_list()
self.assertIsNone(tlds.match(url))
def test_tld_list_match_unicode_address(self):
end = unicode("বাংলা", 'utf-8')
beg = "example"
url = '.'.join((beg, end))
tlds = self.default_list()
self.assertEqual(end, tlds.match(url))
def test_tld_list_match_idna(self):
end = unicode("বাংলা", 'utf-8')
beg = "example"
url = '.'.join((beg, end)).encode('idna')
tlds = self.default_list()
self.assertEqual(end, tlds.match_idna(url))
def test_wildcard_match(self):
url = "hammersmashedface.kawasaki.jp"
tlds = self.default_list()
self.assertEqual(url, tlds.match(url))
def test_exception_match(self):
url = "city.kobe.jp"
tlds = self.default_list()
self.assertEqual("kobe.jp", tlds.match(url))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jmholla/markflow",
"score": 3
} |
#### File: markflow/detectors/list.py
```python
from typing import List, Tuple
from ._lines import (
is_blank_line_line,
is_list_start_line,
is_table_start_line,
is_thematic_break_line,
)
def list_started(line: str, index: int, lines: List[str]) -> bool:
"""DEPRECATED"""
return is_list_start_line(line)
def list_ended(line: str, index: int, lines: List[str]) -> bool:
"""DEPRECATED"""
return (
is_blank_line_line(line)
or is_table_start_line(line)
or is_thematic_break_line(line)
)
def split_list(lines: List[str], line_offset: int = 0) -> Tuple[List[str], List[str]]:
list_ = []
remaining_lines = lines
index = 0
if list_started(lines[index], index, lines):
list_.append(lines[index])
for index, line in enumerate(lines[1:], start=index + 1):
if list_ended(line, index, lines):
break
list_.append(line)
else:
index += 1
remaining_lines = lines[index:]
return list_, remaining_lines
```
#### File: markflow/formatters/atx_heading.py
```python
import logging
from .._utils import truncate_str
from ..typing import Number
from .base import MarkdownSection
__all__ = ["MarkdownATXHeading"]
logger = logging.getLogger(__name__)
REPR_CONTENT_LEN = 20
class MarkdownATXHeading(MarkdownSection):
@property
def content(self) -> str:
if not self.lines:
raise RuntimeError(
f"Attempted access of uninitialized {self.__class__.__name__}."
)
return self.lines[0].strip().lstrip("#").strip()
@property
def depth(self) -> int:
if not self.lines:
raise RuntimeError(
f"Attempted access of uninitialized {self.__class__.__name__}."
)
return len(self.lines[0].strip()) - len(self.lines[0].strip().lstrip("#"))
def append(self, line: str) -> None:
if self.lines:
raise RuntimeError(
"Attempted to add another line to an ATX Header. They can only be one "
"line."
)
self.lines.append(line)
def reformatted(self, width: Number = 88) -> str:
# TODO: This prints out twice. We probably need a first pass step that calls out
# errors we will be fixing to suppress extra statements from reprocessing the
# document.
if not self.lines[0].strip().lstrip("#").startswith(" "):
logger.warning(
"Line %d is an ATX Header without a space after #'s. This has been "
"corrected.",
self.line_index + 1,
)
return "#" * self.depth + " " + self.content
def __repr__(self) -> str:
return (
f"<"
f"{self.__class__.__name__}: "
f"depth={repr(self.depth)}; "
f"content={repr(truncate_str(self.content, REPR_CONTENT_LEN))}"
f">"
)
```
#### File: markflow/formatters/blank_line.py
```python
from ..typing import Number
from .base import MarkdownSection
__all__ = ["MarkdownBlankLine"]
class MarkdownBlankLine(MarkdownSection):
def append(self, line: str) -> None:
if line.strip():
raise RuntimeError(
f"A line with non-whitespace characters has been added to a "
f"`{self.__class__.__name__}`. Please open a bug report or email "
f"<EMAIL>."
)
if self.lines:
raise RuntimeError(
f"`{self.__class__.__name__}`s can only contain one line. Please open "
f"a bug report or email <EMAIL>."
)
self.lines.append(line)
def reformatted(self, width: Number = 88) -> str:
# The new line will be added on join
return ""
def __repr__(self) -> str:
return f"<{self.__class__.__name__}>"
```
#### File: markflow/formatters/indented_code_block.py
```python
import logging
from .._utils import truncate_str
from ..typing import Number
from .base import MarkdownSection
__all__ = ["MarkdownIndentedCodeBlock"]
logger = logging.getLogger(__name__)
REPR_CONTENT_LEN = 20
class MarkdownIndentedCodeBlock(MarkdownSection):
@property
def first_line(self) -> str:
return self.lines[0].strip()
def append(self, line: str) -> None:
self.lines.append(line)
def reformatted(self, width: Number = 88) -> str:
return "\n".join([line.rstrip() for line in self.lines])
def __repr__(self) -> str:
return (
f"<"
f"{self.__class__.__name__}: "
f"first_line={repr(truncate_str(self.first_line, REPR_CONTENT_LEN))}"
f">"
)
```
#### File: markflow/tests/test_horizontal_line.py
```python
import math
from markflow.formatters.thematic_break import MarkdownThematicBreak
from .util import create_section, render
class TestThematicBreak:
def test_too_short(self) -> None:
width = 50
input_ = "---"
expected = "-" * width
h_line = create_section(MarkdownThematicBreak, input_)
assert h_line.reformatted(width) == expected
assert render(expected) == render(input_)
def test_too_long(self) -> None:
width = 50
input_ = "-" * 100
expected = "-" * width
h_line = create_section(MarkdownThematicBreak, input_)
assert h_line.reformatted(width) == expected
assert render(expected) == render(input_)
def test_infinity(self) -> None:
width = math.inf
input_ = "----------"
expected = "---"
h_line = create_section(MarkdownThematicBreak, input_)
assert h_line.reformatted(width) == expected
assert render(expected) == render(input_)
```
#### File: markflow/tests/test_indented_code_block.py
```python
from markflow.formatters.indented_code_block import MarkdownIndentedCodeBlock
from .util import create_section, render
class TestIndentedCodeBlock:
def test_simple(self) -> None:
input_ = " import goods \n" "\n" " tariffs = good.audit() \n"
expected = " import goods\n" "\n" " tariffs = good.audit()"
code_block = create_section(MarkdownIndentedCodeBlock, input_)
assert code_block.reformatted() == expected
code_block = create_section(MarkdownIndentedCodeBlock, expected)
assert code_block.reformatted() == expected
assert render(expected) == render(input_)
``` |
{
"source": "jmholt20/timbretransfer",
"score": 2
} |
#### File: timbretransfer/code/train_multitarget.py
```python
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import ast
import os
import time
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from config import CHECKPOINT_DIR, IMG_DIM, OUTPUT_PATH, TEST_AUDIOS_PATH
from data import (DataGeneratorMultiTarget, amplitude_to_db, db_to_amplitude,
forward_transform, init_directory, inverse_transform,
join_magnitude_slices, load_audio, slice_magnitude,
write_audio)
from losses import discriminator_loss, generator_loss, l1_loss
from model import Discriminator, Generator
def generate_audio(prediction, phase, output_name):
mag_db = join_magnitude_slices(prediction, phase.shape)
mag = db_to_amplitude(mag_db)
audio = inverse_transform(mag, phase)
write_audio(output_name, audio)
def generate_images(prediction, test_input, target, output_name):
display_list = [prediction[0,:,:,0], test_input[0,:,:,0], target[0,:,:,0]]
title = ['pred']
for i in range(len(title)):
temp_img = np.flip((display_list[i] + 1) / 2, axis=0) # [-1,1] >> [0,1]
plt.imsave(output_name+'_'+title[i]+'.png', temp_img)
def write_csv(df, output_name):
df.to_csv(output_name, header='column_names')
def plot_loss_findlr(losses, lrs, output_name, n_skip_beginning=10, n_skip_end=5):
"""
Plots the loss.
Parameters:
n_skip_beginning - number of batches to skip on the left.
n_skip_end - number of batches to skip on the right.
"""
plt.figure()
plt.ylabel("loss")
plt.xlabel("learning rate (log scale)")
plt.plot(lrs[n_skip_beginning:-n_skip_end], losses[n_skip_beginning:-n_skip_end])
plt.xscale('log')
plt.savefig(output_name)
def find_lr(data, batch_size=1, start_lr=1e-9, end_lr=1):
generator = Generator(input_shape=[None,None,2])
discriminator = Discriminator(input_shape=[None,None,1])
generator_optimizer = tf.keras.optimizers.Adam(lr=start_lr)
discriminator_optimizer = tf.keras.optimizers.Adam(lr=start_lr)
model_name = data['training'].origin+'_2_any'
checkpoint_prefix = os.path.join(CHECKPOINT_DIR, model_name)
if(not os.path.isdir(checkpoint_prefix)):
os.makedirs(checkpoint_prefix)
epoch_size = data['training'].__len__()
lr_mult = (end_lr / start_lr) ** (1 / epoch_size)
lrs = []
losses = {
'gen_mae': [],
'gen_loss': [],
'disc_loss': []
}
best_losses = {
'gen_mae': 1e9,
'gen_loss': 1e9,
'disc_loss': 1e9
}
print()
print("Finding the optimal LR with the following parameters: ")
print("\tCheckpoints: \t", checkpoint_prefix)
print("\tEpochs: \t", 1)
print("\tBatchSize: \t", batch_size)
print("\tnBatches: \t", epoch_size)
print()
print('Epoch {}/{}'.format(1, 1))
progbar = tf.keras.utils.Progbar(epoch_size)
for i in range(epoch_size):
# Get the data from the DataGenerator
input_image, target = data['training'].__getitem__(i)
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
# Generate a fake image
gen_output = generator(input_image, training=True)
# Train the discriminator
disc_real_output = discriminator([input_image[:,:,:,0:1], target], training=True)
disc_generated_output = discriminator([input_image[:,:,:,0:1], gen_output], training=True)
# Compute the losses
gen_mae = l1_loss(target, gen_output)
gen_loss = generator_loss(disc_generated_output, gen_mae)
disc_loss = discriminator_loss(disc_real_output, disc_generated_output)
# Compute the gradients
generator_gradients = gen_tape.gradient(gen_loss, generator.trainable_variables)
discriminator_gradients = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
# Apply the gradients
generator_optimizer.apply_gradients(zip(generator_gradients, generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(discriminator_gradients, discriminator.trainable_variables))
# Convert losses to numpy
gen_mae = gen_mae.numpy()
gen_loss = gen_loss.numpy()
disc_loss = disc_loss.numpy()
# Update the progress bar
progbar.add(1, values=[
("gen_mae", gen_mae),
("gen_loss", gen_loss),
("disc_loss", disc_loss)
])
# On batch end
lr = tf.keras.backend.get_value(generator_optimizer.lr)
lrs.append(lr)
# Update the lr
lr *= lr_mult
tf.keras.backend.set_value(generator_optimizer.lr, lr)
tf.keras.backend.set_value(discriminator_optimizer.lr, lr)
# Update the losses
losses['gen_mae'].append(gen_mae)
losses['gen_loss'].append(gen_loss)
losses['disc_loss'].append(disc_loss)
# Update the best losses
if(best_losses['gen_mae'] > gen_mae):
best_losses['gen_mae'] = gen_mae
if(best_losses['gen_loss'] > gen_loss):
best_losses['gen_loss'] = gen_loss
if(best_losses['disc_loss'] > disc_loss):
best_losses['disc_loss'] = disc_loss
if(gen_mae >= 100*best_losses['gen_mae'] or gen_loss >= 100*best_losses['gen_loss'] or disc_loss >= 100*best_losses['disc_loss']):
break
plot_loss_findlr(losses['gen_mae'], lrs, os.path.join(checkpoint_prefix, 'LRFinder_gen_mae.tiff'))
plot_loss_findlr(losses['gen_loss'], lrs, os.path.join(checkpoint_prefix, 'LRFinder_gen_loss.tiff'))
plot_loss_findlr(losses['disc_loss'], lrs, os.path.join(checkpoint_prefix, 'LRFinder_disc_loss.tiff'))
print('Best losses:')
print('gen_mae =', best_losses['gen_mae'])
print('gen_loss =', best_losses['gen_loss'])
print('disc_loss =', best_losses['disc_loss'])
def train(data, epochs, batch_size=1, gen_lr=5e-6, disc_lr=5e-7, epoch_offset=0):
generator = Generator(input_shape=[None,None,2])
discriminator = Discriminator(input_shape=[None,None,1])
generator_optimizer = tf.keras.optimizers.Adam(gen_lr)
discriminator_optimizer = tf.keras.optimizers.Adam(disc_lr)
model_name = data['training'].origin+'_2_any'
checkpoint_prefix = os.path.join(CHECKPOINT_DIR, model_name)
if(not os.path.isdir(checkpoint_prefix)):
os.makedirs(checkpoint_prefix)
else:
if(os.path.isfile(os.path.join(checkpoint_prefix, 'generator.h5'))):
generator.load_weights(os.path.join(checkpoint_prefix, 'generator.h5'), by_name=True)
print('Generator weights restorred from ' + checkpoint_prefix)
if(os.path.isfile(os.path.join(checkpoint_prefix, 'discriminator.h5'))):
discriminator.load_weights(os.path.join(checkpoint_prefix, 'discriminator.h5'), by_name=True)
print('Discriminator weights restorred from ' + checkpoint_prefix)
# Get the number of batches in the training set
epoch_size = data['training'].__len__()
print()
print("Started training with the following parameters: ")
print("\tCheckpoints: \t", checkpoint_prefix)
print("\tEpochs: \t", epochs)
print("\tgen_lr: \t", gen_lr)
print("\tdisc_lr: \t", disc_lr)
print("\tBatchSize: \t", batch_size)
print("\tnBatches: \t", epoch_size)
print()
# Precompute the test input and target for validation
audio_input = load_audio(os.path.join(TEST_AUDIOS_PATH, data['training'].origin+'.wav'))
mag_input, phase = forward_transform(audio_input)
mag_input = amplitude_to_db(mag_input)
test_input = slice_magnitude(mag_input, mag_input.shape[0])
test_input = (test_input * 2) - 1
test_inputs = []
test_targets = []
for t in data['training'].target:
audio_target = load_audio(os.path.join(TEST_AUDIOS_PATH, t+'.wav'))
mag_target, _ = forward_transform(audio_target)
mag_target = amplitude_to_db(mag_target)
test_target = slice_magnitude(mag_target, mag_target.shape[0])
test_target = (test_target * 2) - 1
test_target_perm = test_target[np.random.permutation(test_target.shape[0]),:,:,:]
test_inputs.append(np.concatenate([test_input, test_target_perm], axis=3))
test_targets.append(test_target)
gen_mae_list, gen_mae_val_list = [], []
gen_loss_list, gen_loss_val_list = [], []
disc_loss_list, disc_loss_val_list = [], []
for epoch in range(epochs):
gen_mae_total, gen_mae_val_total = 0, 0
gen_loss_total, gen_loss_val_total = 0, 0
disc_loss_total, disc_loss_val_total = 0, 0
print('Epoch {}/{}'.format((epoch+1)+epoch_offset, epochs+epoch_offset))
progbar = tf.keras.utils.Progbar(epoch_size)
for i in range(epoch_size):
# Get the data from the DataGenerator
input_image, target = data['training'].__getitem__(i)
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
# Generate a fake image
gen_output = generator(input_image, training=True)
# Train the discriminator
disc_real_output = discriminator([input_image[:,:,:,0:1], target], training=True)
disc_generated_output = discriminator([input_image[:,:,:,0:1], gen_output], training=True)
# Compute the losses
gen_mae = l1_loss(target, gen_output)
gen_loss = generator_loss(disc_generated_output, gen_mae)
disc_loss = discriminator_loss(disc_real_output, disc_generated_output)
# Compute the gradients
generator_gradients = gen_tape.gradient(gen_loss,generator.trainable_variables)
discriminator_gradients = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
# Apply the gradients
generator_optimizer.apply_gradients(zip(generator_gradients, generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(discriminator_gradients, discriminator.trainable_variables))
# Update the progress bar
gen_mae = gen_mae.numpy()
gen_loss = gen_loss.numpy()
disc_loss = disc_loss.numpy()
gen_mae_total += gen_mae
gen_loss_total += gen_loss
disc_loss_total += disc_loss
progbar.add(1, values=[
("gen_mae", gen_mae),
("gen_loss", gen_loss),
("disc_loss", disc_loss)
])
gen_mae_list.append(gen_mae_total/epoch_size)
gen_mae_val_list.append(gen_mae_val_total/epoch_size)
gen_loss_list.append(gen_loss_total/epoch_size)
gen_loss_val_list.append(gen_loss_val_total/epoch_size)
disc_loss_list.append(disc_loss_total/epoch_size)
disc_loss_val_list.append(disc_loss_val_total/epoch_size)
history = pd.DataFrame({
'gen_mae': gen_mae_list,
'gen_mae_val': gen_mae_val_list,
'gen_loss': gen_loss_list,
'gen_loss_val': gen_loss_val_list,
'disc_loss': disc_loss_list,
'disc_loss_val': disc_loss_val_list
})
write_csv(history, os.path.join(checkpoint_prefix, 'history.csv'))
epoch_output = os.path.join(OUTPUT_PATH, model_name, str((epoch+1)+epoch_offset).zfill(3))
init_directory(epoch_output)
# Generate audios and save spectrograms for the entire audios
for j in range(len(data['training'].target)):
prediction = generator(test_inputs[j], training=False)
prediction = (prediction + 1) / 2
generate_images(prediction, (test_inputs[j] + 1) / 2, (test_targets[j] + 1) / 2, os.path.join(epoch_output, 'spectrogram_'+data['training'].target[j]))
generate_audio(prediction, phase, os.path.join(epoch_output, 'audio_'+data['training'].target[j]+'.wav'))
print('Epoch outputs saved in ' + epoch_output)
# Save the weights
generator.save_weights(os.path.join(checkpoint_prefix, 'generator.h5'))
discriminator.save_weights(os.path.join(checkpoint_prefix, 'discriminator.h5'))
print('Weights saved in ' + checkpoint_prefix)
# Callback at the end of the epoch for the DataGenerator
data['training'].on_epoch_end()
# data['validation'].on_epoch_end()
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('--dataset_path', required=True)
ap.add_argument('--origin', required=True)
ap.add_argument('--target', required=True)
ap.add_argument('--gpu', required=False, default='0')
ap.add_argument('--epochs', required=False, default=100)
ap.add_argument('--epoch_offset', required=False, default=0)
ap.add_argument('--batch_size', required=False, default=1)
ap.add_argument('--gen_lr', required=False, default=5e-6)
ap.add_argument('--disc_lr', required=False, default=5e-7)
ap.add_argument('--validation_split', required=False, default=0.9)
ap.add_argument('--findlr', required=False, default=False)
args = ap.parse_args()
# Select which GPU to use and enable mixed precision
print('Using GPU: '+ args.gpu)
os.environ["CUDA_DEVICE_ORDER"] = 'PCI_BUS_ID'
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1'
data = {
'training': DataGeneratorMultiTarget(
origin=args.origin,
target=ast.literal_eval(args.target),
base_path=args.dataset_path,
batch_size=int(args.batch_size),
img_dim=IMG_DIM,
validation_split=float(args.validation_split),
is_training=True,
scale_factor=1
),
'validation': DataGeneratorMultiTarget(
origin=args.origin,
target=ast.literal_eval(args.target),
base_path=args.dataset_path,
batch_size=int(args.batch_size),
img_dim=IMG_DIM,
validation_split=float(args.validation_split),
is_training=False,
scale_factor=1,
shuffle=False
)
}
if(args.findlr):
find_lr(data, int(args.batch_size))
else:
train(
data,
int(args.epochs),
int(args.batch_size),
float(args.gen_lr),
float(args.disc_lr),
int(args.epoch_offset)
)
``` |
{
"source": "jmhong-simulation/cpp_test_projects",
"score": 3
} |
#### File: BoostPythonTests/PythonTestPart/PythonTestPart.py
```python
import tensorflow as tf
import numpy as np
sess = tf.InteractiveSession()
input = tf.placeholder(tf.float32, [None, 1])
target = tf.placeholder(tf.float32, [None, 1])
pp = (input, 1)
print(type(pp))
ka = {'inputs' : input, 'units' : 1, 'activation' : tf.nn.tanh, 'name' : 'temp_dense'}
print(type(ka))
def run_with_pos_key(positional, keywords, function):
return function(*positional, **keywords)
#temp = run_with_pos_key(pp, ka, tf.layers.dense)
temp = tf.layers.dense(**ka)
print(temp)
# 1. default way
# temp = tf.layers.dense(inputs = input, units = 1, activation = tf.nn.tanh)
# 2. using positional/keyword argumetns
# pp = (input, 1)
# print(type(pp))
# ka = {'activation' : tf.nn.tanh}
# print(type(ka))
# temp = tf.layers.dense(*pp, **ka)
# 3. using executer function
# def run_with_pos_key(positional, keywords, function):
# return function(*positional, **keywords)
# temp = run_with_pos_key(pp, ka, tf.layers.dense)
loss = tf.losses.mean_squared_error(*(target, temp))
train = tf.train.AdamOptimizer(1e-1).minimize(loss)
x_input = np.ones((1, 1), 'f')
y_target = np.ones((1, 1), 'f')
#print([tensor.name for tensor in tf.get_default_graph().as_graph_def().node])
init_op = tf.global_variables_initializer()
sess.run(init_op)
for i in range(0, 20):
# temp = tf.get_default_graph().get_tensor_by_name('temp_dense/Tanh:0') # find by node name
y_out, lo, _ = sess.run([tf.get_default_graph().get_tensor_by_name('temp_dense/Tanh:0'), loss, train], {input: x_input, target : y_target})
print(y_out, " ", lo)
# data type check version
#import tensorflow as tf
#import numpy as np
#sess = tf.InteractiveSession()
#data_type = tf.float32
#dense_shape_size = [1, 1]
#none_type_object = None
#dense_shape = [none_type_object]
#dense_shape.extend(dense_shape_size)
#print(dense_shape)
#input = tf.placeholder(data_type, dense_shape, "input")
#temp = tf.layers.dense(input, 1)
#print(type(data_type))
#print(type(None))
#print(type(dense_shape))
#x_input = np.ones((1, 1, 1), 'f')
#init_op = tf.global_variables_initializer()
#sess.run(init_op)
#feed_dict = {input: x_input}
#print(type(feed_dict))
#y_out = sess.run([temp], feed_dict)
#print(y_out)
``` |
{
"source": "JMHOO/vessel",
"score": 3
} |
#### File: vessel/test/reader_unittest.py
```python
import unittest
from vessel.preprocess import FileFeeder, DICOMFileIterator
from vessel.utils import GeneratorQueue
class TestGenerator(unittest.TestCase):
def test_generator_queue(self):
batch_size = 12
epochs = 2
feeder = FileFeeder('data')
n_of_samples = len(feeder)
steps_of_epoch = n_of_samples / batch_size
generator = DICOMFileIterator(x=feeder.files(), batch_size=8)
queue = GeneratorQueue(generator=generator)
queue.start()
output = queue.fetch()
epoch = 0
while epoch < epochs:
steps = 0
print("Epoch-{}".format(epoch))
while steps < steps_of_epoch:
item = next(output)
x, y = item
current_batch_size = x.shape[0]
print("Batch-{}, X.shape: {}, Y.shape: {}".format(steps, x.shape, y.shape))
steps += 1
epoch += 1
queue.stop()
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jmhubbard/cocktail_api",
"score": 2
} |
#### File: cocktail_api/ingredients/views.py
```python
from django.shortcuts import render
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .models import Ingredient
from .serializers import IngredientSerializer
@api_view(['GET'])
def ingredientList(request):
ingredients = Ingredient.objects.all().order_by('name')
serializer = IngredientSerializer(ingredients, many=True)
return Response(serializer.data)
```
#### File: cocktail_api/recipes/models.py
```python
from django.db import models
class Recipe(models.Model):
drink = models.ForeignKey(
'drinks.Drink',
on_delete=models.CASCADE,
)
ingredient = models.ForeignKey(
'ingredients.Ingredient',
on_delete=models.CASCADE,
)
amount = models.CharField(max_length=200, null=True, blank=True)
class Meta:
unique_together = (
("drink", "ingredient", "amount")
)
def __str__(self):
return f'{self.amount} {self.ingredient}'
``` |
{
"source": "jmhubbard/Good_Life_Meal_Prep_Subscribers_Page",
"score": 2
} |
#### File: Good_Life_Meal_Prep_Subscribers_Page/main/utils.py
```python
import os
from django.contrib.sites.models import Site
from django.core.mail import EmailMessage, send_mail
from django.template.loader import render_to_string
from django.urls import reverse
from django.core.mail import EmailMultiAlternatives
def send_email(context, message_text, message_html, subject, recipient_list):
message_text = render_to_string(message_text, context=context)
message_html = render_to_string(message_html, context=context)
return send_mail(
subject,
message_text,
os.getenv("EMAIL_HOST_USER"),
recipient_list,
fail_silently=False,
html_message=message_html,
)
def send_email_with_attachment(context, message_text, message_html, subject, recipient_list, weekly_orders_csv):
from_email = os.getenv("EMAIL_HOST_USER")
message_text = render_to_string(message_text, context=context)
message_html = render_to_string(message_html, context=context)
msg = EmailMultiAlternatives(subject, message_text, from_email, recipient_list)
msg.attach_alternative(message_html, "text/html")
msg.attach_file(weekly_orders_csv)
return msg.send()
def get_login_url():
domain = Site.objects.get_current().domain
user_login_path = reverse('login')
user_login_url = f'http://{domain}{user_login_path}'
return user_login_url
def get_admin_login_url():
domain = Site.objects.get_current().domain
admin_login_path = reverse('admin:index')
admin_login_url = f'http://{domain}{admin_login_path}'
return admin_login_url
```
#### File: Good_Life_Meal_Prep_Subscribers_Page/main/views.py
```python
from django.shortcuts import render
from django.views.generic.base import TemplateView
from django.utils.decorators import method_decorator
from .decorators import unauthenticated_user
from .forms import CustomAuthenticationForm, ContactForm
from django.contrib.auth.views import (LoginView, LogoutView)
from django.contrib.messages.views import SuccessMessageMixin
from django.views.generic.edit import FormView
from django.urls import reverse_lazy
from django.contrib.auth.mixins import LoginRequiredMixin
from users.models import User
class HomePageView(TemplateView):
"""
The homepage view that just include a signup button and some explanitory text.
"""
template_name = "main/home.html"
@method_decorator(unauthenticated_user) #If user is already authenticated they will be redirected to their subscription page
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
class UserLoginView(LoginView):
"""
The login view for unauthenticated users. If a user is already authenticated,
they will be redirected to the menu page.
"""
form_class = CustomAuthenticationForm
template_name = "registration/login.html"
@method_decorator(unauthenticated_user) #If user is already authenticated they will be redirected to their subscription page
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
class UserLogoutView(LogoutView):
"""
A generic logout view that redirects users to the homepage after logging them out.
"""
next_page = 'home'
class ContactFormView(SuccessMessageMixin, LoginRequiredMixin, FormView):
template_name = "main/contact_form.html"
form_class = ContactForm
success_url = reverse_lazy('contact_form')
success_message = "Thank you for contacting us. We value your feedback. Somebody will reply within 24 hours."
def form_valid(self, form):
current_user = User.objects.get(email=self.request.user)
form.send_message(current_user)
return super().form_valid(form)
```
#### File: Good_Life_Meal_Prep_Subscribers_Page/meals/admin.py
```python
from django.contrib import admin
from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ngettext
from .models import Meal
from users.models import User
from orderitems.models import OrderItem
class MealAdmin(admin.ModelAdmin):
actions = [
'add_meals_to_menu',
'remove_meals_from_menu',
'set_menu_sort_order_to_zero',
]
def add_meals_to_menu(self, request, queryset):
total = 0
all_customers = User.objects.all()
for meal in queryset:
meal.is_on_menu = True
meal.save()
total+=1
for customer in all_customers:
try:
current_orderItem = OrderItem.objects.get(user=customer, item=meal)
current_orderItem.is_on_current_menu = True
current_orderItem.save()
except ObjectDoesNotExist:
OrderItem.objects.create(user=customer, item=meal, is_on_current_menu=True)
self.message_user(request, ngettext(
'%d meal was successfully added to the menu.',
'%d meals were successfully added to the menu.',
total,
) % total, messages.SUCCESS)
def remove_meals_from_menu(self, request, queryset):
total = 0
for meal in queryset:
meal.is_on_menu = False
meal.save()
total +=1
all_orderItems_for_current_meal = OrderItem.objects.filter(item=meal)
for current_orderItem in all_orderItems_for_current_meal:
current_orderItem.is_on_current_menu = False
current_orderItem.quantity = 0
current_orderItem.save()
self.message_user(request, ngettext(
'%d meal was successfully removed from the menu.',
'%d meals were successfully removed from the menu',
total,
) % total, messages.SUCCESS)
def set_menu_sort_order_to_zero(self, request, queryset):
total = 0
for meal in queryset:
meal.menu_sort_order = 0
meal.save()
total += 1
self.message_user(request, ngettext(
'%d meals menu sort order was set to empty.',
'%d meals menu sort order was set to empty.',
total,
) % total, messages.SUCCESS)
list_display = ('name', 'is_on_menu', 'menu_sort_order', 'description', 'proteins', 'carbs', 'fats', 'calories')
list_filter = ('is_on_menu','on_last_weeks_menu')
field_display = ('name','description','proteins', 'carbs', 'fats', 'calories', 'menu_sort_order', 'large_picture_url', 'created_at', 'updated_at')
readonly_fields = ('created_at', 'updated_at', 'is_on_menu', 'on_last_weeks_menu')
search_fields = ('name',)
ordering = ('name',)
admin.site.register(Meal, MealAdmin)
```
#### File: management/commands/email_orders_if_friday.py
```python
import datetime
from django.core.management.base import BaseCommand
from orderitems.utils import emailWeeklyOrders, weekly_order_confirmation_email
class Command(BaseCommand):
help = 'Emails admin an email containing the weekly orders if it is friday in UTC timezone'
def add_arguments(self, parser):
parser.add_argument('--any_day', action='store_true', help='Sends email regardless of the day of the week')
def handle(self, *args, **options):
#Monday=0, Tuesday=1, Wednesday=2, Thursday=3, Friday=4, Saturday=5, Sunday=6
current_day_of_the_week = datetime.datetime.today().weekday()
if options['any_day']:
current_day_of_the_week = 4
if current_day_of_the_week == 4:
total_emails_sent, current_admins = emailWeeklyOrders()
print("{} order emails have been sent to this list of admins: {}".format(total_emails_sent, current_admins))
total_user_emails_sent, total_full_orders, total_empty_orders = weekly_order_confirmation_email()
print("{} total order confirmations have been sent".format(total_user_emails_sent))
print("{} users placed an order".format(total_full_orders))
print("{} users did not place an order".format(total_empty_orders))
else:
print("email_orders_if_friday was run, but today is not Friday so no emails were sent.")
```
#### File: Good_Life_Meal_Prep_Subscribers_Page/orderitems/models.py
```python
from django.db import models
from users.models import User
from meals.models import Meal
class OrderItem(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
item = models.ForeignKey(Meal, on_delete=models.CASCADE)
quantity = models.PositiveSmallIntegerField(default=0)
special_requests = models.TextField(blank=True)
is_on_current_menu = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
unique_together = (
("user", "item"),
)
def __str__(self):
return f"{self.user}'s order for {self.item}"
```
#### File: Good_Life_Meal_Prep_Subscribers_Page/users/forms.py
```python
import datetime
import string
from django import forms
from django.contrib.auth import (
authenticate, get_user_model, password_validation,
)
from django.contrib.auth.forms import _unicode_ci_compare, SetPasswordForm
from django.contrib.auth.password_validation import validate_password
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ValidationError
from django.core.mail import EmailMultiAlternatives
from django.core.validators import RegexValidator
from django.forms import ModelForm
from django.template import loader
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from django.utils.translation import gettext_lazy as _
from meals.models import Meal
from .models import User
from orderitems.models import OrderItem
UserModel = get_user_model()
years = []
for year in range(1900,2022):
years.append(year)
class UserSignUpForm(forms.ModelForm):
"""
A custom form used by UserSignUpView for when an unauthenticated user creates an account.
Widgets for each field are assigned so that a bootstrap class of 'form-control' could be added
to each widget. The date_of_birth field's input format is set as MM/DD/YYYY in the widget.
"""
password1 = forms.CharField(label='Password', widget=forms.PasswordInput(attrs={'class':'form-control'}))
password2 = forms.CharField(label='<PASSWORD>', widget=forms.PasswordInput(attrs={'class':'form-control'}))
date_of_birth = forms.DateField(label='Birthday', initial= datetime.date.today(), widget=forms.SelectDateWidget(years=years, attrs={'class': 'form-control'}))
class Meta:
model = User
fields = ('email','name', 'date_of_birth', 'phone_number', 'street_address', 'city', 'state', 'zip_code')
widgets = {
'email': forms.EmailInput(attrs={'class': 'form-control'}),
'name': forms.TextInput(attrs={'class': 'form-control'}),
'phone_number': forms.TextInput(attrs={'class': 'form-control'}),
'street_address': forms.TextInput(attrs={'class': 'form-control'}),
'city': forms.TextInput(attrs={'class': 'form-control'}),
'state': forms.Select(attrs={'class': 'form-control'}),
'zip_code': forms.TextInput(attrs={'class': 'form-control'}),
}
labels = {
'name': 'Full name'
}
def clean_email(self):
email = self.cleaned_data['email']
return email.lower()
# return email
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
validate_password(password2)
return password2
def save(self, *args, **kwargs):
user = super().save(commit=False)
user.set_password(self.cleaned_data["password2"])
user.save()
return user
class UserUpdateForm(forms.ModelForm):
"""
A custom form used by UserProfileUpdateView to allow authenticated users to update their profile
information. Widgets for each field are assigned so that a bootstrap class of 'form-control'
could be added to each widget. The date_of_birth field's input format is set as MM/DD/YYYY in the widget.
"""
class Meta:
model = User
fields = ('name', 'date_of_birth', 'phone_number', 'street_address', 'city', 'state', 'zip_code')
widgets = {
'name': forms.TextInput(attrs={'class': 'form-control'}),
'date_of_birth': forms.SelectDateWidget(years=years, attrs={'class': 'form-control'}),
'phone_number': forms.TextInput(attrs={'class': 'form-control'}),
'street_address': forms.TextInput(attrs={'class': 'form-control'}),
'city': forms.TextInput(attrs={'class': 'form-control'}),
'state': forms.Select(attrs={'class': 'form-control'}),
'zip_code': forms.TextInput(attrs={'class': 'form-control'}),
}
labels = {
'name': 'Full name'
}
#Copied from SetPasswordForm
class CustomUserSetPasswordForm(forms.Form):
"""
A form that lets a user change set their password without entering the old
password. This was copied over from SetPasswordForm, which is used by the class_form = PasswordChangeForm that is used
by the generic PasswordChangeView. This was copied over so that the bootstrap class 'form-control' could be added
to each widget to allow for bootstrap styling.
"""
error_messages = {
'password_mismatch': _('The two password fields didn’t match.'),
}
new_password1 = forms.CharField(
label=_("New password"),
widget=forms.PasswordInput(attrs={'autocomplete': 'new-password', 'class': 'form-control'}),
strip=False,
help_text=password_validation.password_validators_help_text_html(),
)
new_password2 = forms.CharField(
label=_("New password confirmation"),
strip=False,
widget=forms.PasswordInput(attrs={'autocomplete': 'new-password', 'class': 'form-control'}),
)
def __init__(self, user, *args, **kwargs):
self.user = user
super().__init__(*args, **kwargs)
def clean_new_password2(self):
password1 = self.cleaned_data.get('<PASSWORD>')
password2 = self.cleaned_data.get('<PASSWORD>')
if password1 and password2:
if password1 != password2:
raise ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
password_validation.validate_password(password2, self.user)
return password2
def save(self, commit=True):
password = self.cleaned_data["<PASSWORD>"]
self.user.set_password(password)
if commit:
self.user.save()
return self.user
#Copied from PasswordChangeForm
class CustomUserPasswordChangeForm(CustomUserSetPasswordForm):
"""
A form that lets a user change their password by entering their old
password. This was copied from the PasswordChangeForm, which is the class_form for the generic
PasswordChangeView. This was copied over so that the bootstrap class 'form-control' could be added
to each widget to allow for bootstrap styling.
"""
error_messages = {
**SetPasswordForm.error_messages,
'password_incorrect': _("Your old password was entered incorrectly. Please enter it again."),
}
old_password = forms.CharField(
label=_("Old password"),
strip=False,
widget=forms.PasswordInput(attrs={'autocomplete': 'current-password', 'autofocus': True, 'class':'form-control'}),
)
field_order = ['old_password', 'new_password1', 'new_password2']
def clean_old_password(self):
"""
Validate that the old_password field is correct.
"""
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise ValidationError(
self.error_messages['password_incorrect'],
code='password_incorrect',
)
return old_password
class CustomPasswordResetForm(forms.Form):
email = forms.EmailField(
label=_("Email"),
max_length=254,
widget=forms.EmailInput(attrs={'autocomplete': 'email', 'class': 'form-control'})
)
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email, html_email_template_name=None):
"""
Send a django.core.mail.EmailMultiAlternatives to `to_email`.
"""
subject = loader.render_to_string(subject_template_name, context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
body = loader.render_to_string(email_template_name, context)
email_message = EmailMultiAlternatives(subject, body, from_email, [to_email])
if html_email_template_name is not None:
html_email = loader.render_to_string(html_email_template_name, context)
email_message.attach_alternative(html_email, 'text/html')
email_message.send()
def get_users(self, email):
"""Given an email, return matching user(s) who should receive a reset.
This allows subclasses to more easily customize the default policies
that prevent inactive users and users with unusable passwords from
resetting their password.
"""
email_field_name = UserModel.get_email_field_name()
active_users = UserModel._default_manager.filter(**{
'%s__iexact' % email_field_name: email,
'is_active': True,
})
return (
u for u in active_users
if u.has_usable_password() and
_unicode_ci_compare(email, getattr(u, email_field_name))
)
def save(self, domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=default_token_generator,
from_email=None, request=None, html_email_template_name=None,
extra_email_context=None):
"""
Generate a one-use only link for resetting password and send it to the
user.
"""
email = self.cleaned_data["email"]
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
email_field_name = UserModel.get_email_field_name()
for user in self.get_users(email):
user_email = getattr(user, email_field_name)
context = {
'email': user_email,
'domain': domain,
'site_name': site_name,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'user': user,
'token': token_generator.make_token(user),
'protocol': 'https' if use_https else 'http',
**(extra_email_context or {}),
}
self.send_mail(
subject_template_name, email_template_name, context, from_email,
user_email, html_email_template_name=html_email_template_name,
)
class CustomPasswordSetForm(forms.Form):
"""
A form that lets a user change set their password without entering the old
password
"""
error_messages = {
'password_mismatch': _('The two password fields didn’t match.'),
}
new_password1 = forms.CharField(
label=_("New password"),
widget=forms.PasswordInput(attrs={'autocomplete': 'new-password', 'class': 'form-control'}),
strip=False,
# help_text=password_validation.password_validators_help_text_html(),
)
new_password2 = forms.CharField(
label=_("New password confirmation"),
strip=False,
widget=forms.PasswordInput(attrs={'autocomplete': 'new-password', 'class': 'form-control'}),
)
def __init__(self, user, *args, **kwargs):
self.user = user
super().__init__(*args, **kwargs)
def clean_new_password2(self):
password1 = self.cleaned_data.get('<PASSWORD>')
password2 = self.cleaned_data.get('<PASSWORD>')
if password1 and password2:
if password1 != <PASSWORD>:
raise ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
password_validation.validate_password(password2, self.user)
return password2
def save(self, commit=True):
password = self.cleaned_data["<PASSWORD>"]
self.user.set_password(password)
if commit:
self.user.save()
return self.user
```
#### File: Good_Life_Meal_Prep_Subscribers_Page/users/models.py
```python
from django.db import models
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser, PermissionsMixin
)
from phonenumber_field.modelfields import PhoneNumberField
class UserManager(BaseUserManager):
def create_user(self, email, name, date_of_birth, phone_number, password=None):
"""
Creates and saves a User with the given email, date of
birth and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
name=name,
date_of_birth=date_of_birth,
phone_number=phone_number,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, date_of_birth, phone_number, password=None):
"""
Creates and saves a superuser with the given email, date of
birth and password.
"""
user = self.create_user(
email,
name=name,
password=password,
date_of_birth=date_of_birth,
phone_number=phone_number,
)
user.is_admin = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
NEVADA = 'NV'
states = [
(NEVADA, 'Nevada'),
]
email = models.EmailField(
verbose_name='email address',
max_length=255,
unique=True,
)
name = models.CharField(max_length=255)
date_of_birth = models.DateField()
phone_number = PhoneNumberField()
street_address = models.CharField(max_length=255)
city = models.CharField(max_length=255)
state = models.CharField(max_length=255, choices=states)
zip_code = models.CharField(max_length=6)
remaining_meals = models.PositiveSmallIntegerField(default=40)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name', 'date_of_birth', 'phone_number']
def __str__(self):
return self.email
# def has_perm(self, perm, obj=None):
# "Does the user have a specific permission?"
# # Simplest possible answer: Yes, always
# return True
# def has_module_perms(self, app_label):
# "Does the user have permissions to view the app `app_label`?"
# # Simplest possible answer: Yes, always
# return True
@property
def is_staff(self):
"Is the user a member of staff?"
# Simplest possible answer: All admins are staff
return self.is_admin
``` |
{
"source": "jmhubbard/quote_of_the_day_custom_user",
"score": 2
} |
#### File: quote_of_the_day_custom_user/emails/utils.py
```python
import os
from django.core.mail import send_mail
from django.urls import reverse
from django.contrib.sites.models import Site
from django.template.loader import render_to_string
def email_all_users_an_email(user, showlist):
#Gets the current domain name
domain = Site.objects.get_current().domain
# reverse a url in a view to get the path after the domain
path = reverse('login')
url = 'http://{domain}{path}'.format(domain=domain, path=path)
context = {
"unsubscribe_uri": url,
"showlist": showlist,
}
message_text = render_to_string("emails/email_all_users.txt", context=context)
message_html = render_to_string("emails/email_all_users.html", context=context)
return send_mail(
"New Shows Added",
message_text,
os.getenv("EMAIL_HOST_USER"),
[user],
fail_silently=False,
html_message=message_html,
)
def email_test(user, message):
send_mail(
'Quote test',
message,
os.getenv("EMAIL_HOST_USER"),
[user],
fail_silently=False,
)
def email_daily_tv_quote(quote, user):
#Gets the current domain name
domain = Site.objects.get_current().domain
# reverse a url in a view to get the path after the domain
path = reverse('login')
url = 'http://{domain}{path}'.format(domain=domain, path=path)
context = {
"unsubscribe_uri": url,
"quote": quote,
}
message_text = render_to_string("emails/tv_email.txt", context=context)
message_html = render_to_string("emails/tv_email.html", context=context)
return send_mail(
"Quote Of The Day",
message_text,
os.getenv("EMAIL_HOST_USER"),
[user],
fail_silently=False,
html_message=message_html,
)
def email_daily_movie_quote(quote, user):
#Gets the current domain name
domain = Site.objects.get_current().domain
# reverse a url in a view to get the path after the domain
path = reverse('login')
url = 'http://{domain}{path}'.format(domain=domain, path=path)
context = {
"unsubscribe_uri": url,
"quote": quote,
}
message_text = render_to_string("emails/movie_email.txt", context=context)
message_html = render_to_string("emails/movie_email.html", context=context)
return send_mail(
"Quote Of The Day",
message_text,
os.getenv("EMAIL_HOST_USER"),
[user],
fail_silently=False,
html_message=message_html,
)
```
#### File: quote_of_the_day_custom_user/emails/views.py
```python
from django.shortcuts import render
from emails.forms import ContactForm
from django.shortcuts import redirect
from django.views.generic.edit import FormView
from django.urls import reverse_lazy
from django.contrib.messages.views import SuccessMessageMixin
class ContactView(SuccessMessageMixin, FormView):
template_name = 'emails/contactform.html'
form_class = ContactForm
success_url = reverse_lazy('contactform')
success_message = 'Your message was sent'
def form_valid(self, form):
form.send_message()
return super().form_valid(form)
```
#### File: management/commands/full_quote_loader_original.py
```python
from django.core.management.base import BaseCommand
from quotes.models import Quote
from shows.models import Character, Episode, Show
from quotes.quotelist_withoutmovies import quotelistdict
from django.db import IntegrityError
class Command(BaseCommand):
help = 'Creates quotes, shows, characters, and episodes from quotes.quotelist.quotedictlist'
def add_arguments(self, parser):
parser.add_argument('--save', action='store_true', help='Save quotes, shows, characters, and episodes to the database')
def handle(self, *args, **options):
totalAttemptedItems = 0
#Show Counts
savedShowCount = 0
duplicateShows = 0
#Character Counts
savedCharacterCount = 0
duplicateCharacters = 0
#Episode Counts
savedEpisodeCount = 0
duplicateEpisodes = 0
#Quote Counts
savedQuoteCount = 0
duplicateQuotes = 0
for item in quotelistdict:
totalAttemptedItems += 1
#Create an instance of Show using the provided show name
show = Show(
name = item["show"],
is_active = False,
)
if options["save"]:
try:
#Save the show instance if it doesn't exist in database
show.save()
except IntegrityError:
#If the show already exists then get that shows object to use when saving episodes
duplicateShows += 1
show = Show.objects.get(name=item["show"])
else:
savedShowCount += 1
finally:
#Create an instance of Episode using the provided episode name, season and the previous show instance
episode = Episode(
name = item["episode"],
season = item["season"],
show = show
)
try:
episode.save()
except IntegrityError:
#If the episode already exists then get that episodes object to use when saving character
duplicateEpisodes += 1
episode = Episode.objects.get(
name=item["episode"],
season=item["season"],
show=show
)
else:
savedEpisodeCount += 1
finally:
#Create an instance of Character using the provided first name, last name, and previous show object
character = Character(
first_name = item["first_name"],
last_name = item["last_name"],
show = show
)
try:
character.save()
except IntegrityError:
#If the Character already exists then get that character object to use when saving quote
duplicateCharacters += 1
character = Character.objects.get(
first_name = item["first_name"],
last_name = item["last_name"],
show = show
)
else:
savedCharacterCount += 1
finally:
#Create an instance of Quote using the provided quote text, the previous character, and the previous episode
quote = Quote(
text = item["quote"],
speaker = character,
episode = episode
)
try:
quote.save()
except IntegrityError:
duplicateQuotes += 1
# continue
else:
savedQuoteCount += 1
print(f'Total attempted items: {totalAttemptedItems}')
#Final Show Counts
print(f'Total saved shows: {savedShowCount}')
print(f'Skipped {duplicateShows} duplicated shows')
#Final Character Counts
print(f'Total saved characters: {savedCharacterCount}')
print(f'Skipped {duplicateCharacters} duplicated characters')
#Final Episode Counts
print(f'Total saved episodes: {savedEpisodeCount}')
print(f'Skipped {duplicateEpisodes} duplicated episodes')
#Final Quote Counts
print(f'Total saved quotes: {savedQuoteCount}')
print(f'Skipped {duplicateQuotes} duplicated quotes')
```
#### File: management/commands/quote_emailer.py
```python
from django.core.management.base import BaseCommand
from quotes.models import Quote
from shows.models import Character, Episode, Show
from users.models import User
from subscriptions.models import Subscription
import random
from emails.utils import email_test
from emails.models import EmailTracker
from datetime import date, timedelta, datetime
from emails.utils import email_daily_tv_quote, email_daily_movie_quote
from django.db import IntegrityError
class Command(BaseCommand):
help = 'Sends out a daily random quote that the user is subscribed to'
# def add_arguments(self, parser):
# parser.add_argument('--save', action='store_true', help='Save quotes, shows, characters, and episodes to the database')
def handle(self, *args, **options):
#Filters show objects to get current active shows and puts the show names in an array labeled show_list. That array is then randomly
#shuffled to change the order at which shows are iterated through.
active_shows = Show.objects.filter(is_active = True)
show_list = []
for item in active_shows:
show_list.append(item.name)
random.shuffle(show_list)
#For each show in show_list, all quotes for that show are filtered from all quotes, and one is choicen at random.
for show in show_list:
show_object = Show.objects.get(name = show)
if show_object.category == 2:
show_quotes = Quote.objects.filter(episode__show__name = show)
random_quote = random.choice(show_quotes)
#Users are filtred by those that are subscribed to the current show.
current_subscribers = User.objects.filter(subscription__show__name = show, subscription__status = 1)
for user in current_subscribers:
today_date = date.today()
#Get the date/time of the users last quote email sent through quote_emailer
users_last_email = EmailTracker.objects.get(user = user)
#Will send a quote if the person hasn't recived one yet. This ensures that they only recive one quote and not one for every show they are subscribed to
if users_last_email.last_quote_email.date() != today_date:
# email_daily_tv_quote(quote_email, user.email)
email_daily_tv_quote(random_quote, user.email)
users_last_email.last_quote_email = datetime.now()
users_last_email.save()
elif show_object.category == 1:
show_quotes = Quote.objects.filter(speaker__show__name = show)
random_quote = random.choice(show_quotes)
current_subscribers = User.objects.filter(subscription__show__name = show, subscription__status = 1)
for user in current_subscribers:
today_date = date.today()
#Get the date/time of the users last quote email sent through quote_emailer
users_last_email = EmailTracker.objects.get(user = user)
#Will send a quote if the person hasn't recived one yet. This ensures that they only recive one quote and not one for every show they are subscribed to
if users_last_email.last_quote_email.date() != today_date:
# email_daily_tv_quote(quote_email, user.email)
email_daily_movie_quote(random_quote, user.email)
users_last_email.last_quote_email = datetime.now()
users_last_email.save()
```
#### File: management/commands/show_loader.py
```python
from django.core.management.base import BaseCommand
from quotes.models import Episode, Show
from quotes.quotelist import quotelistdict
from django.db import IntegrityError
from django.core.exceptions import ObjectDoesNotExist
class Command(BaseCommand):
help = 'Loads shows from quotes.quotelist.quotedictlist'
def add_arguments(self, parser):
parser.add_argument('--save', action='store_true', help='Save shows to database')
def handle(self, *args, **options):
savedCount = 0
duplicateCount = 0
totalattempteditems = 0
for item in quotelistdict:
totalattempteditems += 1
show = Show(
name=item["show"],
is_active=False
)
if options["save"]:
try:
show.save()
except IntegrityError:
duplicateCount += 1
continue
else:
savedCount += 1
print(f'Total attempted shows: {totalattempteditems}')
print(f"Skipped {duplicateCount} duplicate shows.")
print(f"Saved: {savedCount} shows.")
print(f"Total: {Show.objects.count()} shows in database.")
```
#### File: quote_of_the_day_custom_user/shows/utils.py
```python
from shows.models import Show
#Returns a QuerySet of only active shows
def getActiveShows():
return Show.objects.filter(is_active = True)
```
#### File: quote_of_the_day_custom_user/subscriptions/models.py
```python
from django.db import models
import users.models
import shows.models
from django import forms
from django.utils.translation import gettext as _
def subscribe_user_on_creation(user, shows):
for show in shows:
subscription = Subscription()
subscription.user = user
subscription.show = show
subscription.save()
subscription_choices = (
('Subscribed', 'Subscribed'),
('Unsubscribed', 'Unsubscribed')
)
class Subscription(models.Model):
STATUS_CHOICE_UNKNOWN = 0
STATUS_CHOICE_SUBSCRIBED = 1
STATUS_CHOICE_UNSUBSCRIBED = 2
STATUS_CHOICES = (
(STATUS_CHOICE_UNKNOWN, _('Unknown')),
(STATUS_CHOICE_SUBSCRIBED, _('Subscribed')),
(STATUS_CHOICE_UNSUBSCRIBED, _('Unsubscribed')),
)
user = models.ForeignKey('users.User', on_delete=models.CASCADE)
show = models.ForeignKey('shows.Show', on_delete=models.CASCADE)
status = models.IntegerField(
choices = STATUS_CHOICES,
default = STATUS_CHOICE_UNSUBSCRIBED,
)
class Meta:
unique_together = (
("user", "show"),
)
def get_absolute_url(self):
from django.urls import reverse
return reverse("subscription-update", args=[self.id])
def __str__(self):
return (f'{self.user} {self.show} {self.status}')
```
#### File: quote_of_the_day_custom_user/users/models.py
```python
from django.db import models
from django.contrib.auth.models import AbstractUser, UserManager
from django.utils.crypto import get_random_string
def make_random_username(length, allowed_chars):
import string
return get_random_string(length=length, allowed_chars=allowed_chars)
class UserManager(UserManager):
def create_user(self, email=None, password=None, **extra_fields):
username = self.make_random_username()
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(username, email, password, **extra_fields)
def create_superuser(self, email=None, password=<PASSWORD>, **extra_fields):
username = self.make_random_username()
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(username, email, password, **extra_fields)
def make_random_username(self):
import string
return make_random_username(length=14, allowed_chars=string.ascii_lowercase)
class User(AbstractUser):
email = models.EmailField(
verbose_name='email address',
unique=True,
)
objects = UserManager()
# EMAIL_FIELD = 'email'
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
def __str__(self):
return self.email
```
#### File: quote_of_the_day_custom_user/users/views.py
```python
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from .models import User
from .forms import UserForm
from django.contrib.messages.views import SuccessMessageMixin
from main.decorators import unauthenticated_user
from django.utils.decorators import method_decorator
from django.views.generic.edit import DeleteView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.views.generic.base import TemplateView
from django.contrib.auth.decorators import login_required
class UserDeleteView(LoginRequiredMixin, DeleteView):
model = User
success_url = reverse_lazy('home')
def get_object(self, queryset=None):
"""
Return the object the view is displaying.
Require `self.queryset` and a `pk` or `slug` argument in the URLconf.
Subclasses can override this to return any object.
"""
# Use a custom queryset if provided; this is required for subclasses
# like DateDetailView
if queryset is None:
queryset = self.get_queryset()
# Next, try looking up by primary key.
pk = self.kwargs.get(self.pk_url_kwarg)
slug = self.kwargs.get(self.slug_url_kwarg)
if pk is not None:
queryset = queryset.filter(pk=pk)
# Next, try looking up by slug.
if slug is not None and (pk is None or self.query_pk_and_slug):
slug_field = self.get_slug_field()
queryset = queryset.filter(**{slug_field: slug})
# If none of those are defined, it's an error.
if pk is None and slug is None:
raise AttributeError(
"Generic detail view %s must be called with either an object "
"pk or a slug in the URLconf." % self.__class__.__name__
)
try:
# Get the single item from the filtered queryset
obj = queryset.get()
except queryset.model.DoesNotExist:
raise Http404(("No %(verbose_name)s found matching the query") %
{'verbose_name': queryset.model._meta.verbose_name})
current_user = self.request.user
if current_user != obj:
raise PermissionDenied
return obj
class UserCreate(SuccessMessageMixin, CreateView):
model = User
form_class = UserForm
success_url = "/accounts/login/"
success_message = "Account was successfully created."
@method_decorator(unauthenticated_user) #If user is already authenticated they will be redirected to their subscription
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
# class UserAccountView(LoginRequiredMixin, TemplateView):
# template_name = "users/account_page.html"
@login_required()
def useraccountview(request):
current_user = request.user
# subscriptions = Subscription.objects.filter(user = current_user)
context ={
'current_user': current_user,
}
return render(request, 'users/account_page.html', context)
``` |
{
"source": "jmhuer/computer-vision-framework",
"score": 3
} |
#### File: jmhuer/computer-vision-framework/main.py
```python
from opts.opts import Options
from webcam import main_cam
def pretty_print(title, msg, size=24):
''' Make print prettier. '''
print(size * "-")
print(("{:^" + str(size) + "}").format(title))
print(size * "-")
print(f"{msg}\n")
if __name__ == "__main__":
# Get options - Inspired by <NAME>
opt = Options().opts
# Print loaded options
pretty_print("OPTIONS", opt)
# Run models in webcam
if opt.webcam:
main_cam(opt)
``` |
{
"source": "jmhuer/DJITelloAutonomy2",
"score": 3
} |
#### File: DJITelloAutonomy2/common/cytron_motor.py
```python
import serial
class Cytron_Motor:
def __init__(self, baud):
self.ser = serial.Serial(
#port='COM3', #uncomment for windows
port='/dev/ttyUSB0',
baudrate=baud
)
# setspeed: sets the speed of the robot given the desired speed as percentage.
# robot continues to move unless
# stop is called.
# inputs:
# speed: a number from 0-100 denoting the speed of the robot as a
# percentage.
def setspeed(self, speed):
#record speed for future uses
self.prevSpeed = speed
#1 is forward and 0 is backward
direction = 1 if speed >= 0 else 0
#calculate the speed section
speedBits = int(63 * (abs(speed)/100))
if direction:
leftMotor = 0b00000000
rightMotor = 0b10000000
else:
leftMotor = 0b01000000
rightMotor = 0b11000000
#set final byte values for both motors
leftMotor = leftMotor | speedBits
rightMotor = rightMotor | speedBits
#construct bytearray
arr = bytearray()
arr.append(leftMotor)
arr.append(rightMotor)
self.ser.write(arr)
arr.clear()
#stop the robot from moving
def stop(self):
leftMotor = 0b00000000
rightMotor = 0b10000000
#construct bytearray
arr = bytearray()
arr.append(leftMotor)
arr.append(rightMotor)
self.ser.write(arr)
arr.clear()
# turnRight: turns the robot towards the right side given a turning speed.
# the turn is a rotation and not a steer.
# robot continues to turn/rotate unless
# stop is called.
# inputs:
# speed: a number from 0-100 denoting the turning speed of the robot as a
# percentage.
def turnRight(self, speed):
#input correction
if speed > 100:
speed = 100
if speed < 0:
speed = 0
#record speed for future uses
self.prevSpeed = speed
#1 is right and left is backward
direction = 1 if speed >= 0 else 0
#calculate the speed section
speedBits = int(63 * (abs(speed)/100))
if direction:
leftMotor = 0b00000000
rightMotor = 0b11000000
else:
leftMotor = 0b01000000
rightMotor = 0b10000000
#set final byte values for both motors
leftMotor = leftMotor | speedBits
rightMotor = rightMotor | speedBits
#construct bytearray
arr = bytearray()
arr.append(leftMotor)
arr.append(rightMotor)
self.ser.write(arr)
arr.clear()
# turnLeft: turns the robot towards the left side given a turning speed.
# the turn is a rotation and not a steer.
# robot continues to turn/rotate unless
# stop is called.
# inputs:
# speed: a number from 0-100 denoting the turning speed of the robot as a
# percentage.
def turnLeft(self, speed):
#input correction
if speed > 100:
speed = 100
if speed < 0:
speed = 0
#record speed for future uses
self.prevSpeed = speed
#1 is right and left is backward
direction = 1 if speed >= 0 else 0
#calculate the speed section
speedBits = int(63 * (abs(speed)/100))
if direction:
leftMotor = 0b01000000
rightMotor = 0b10000000
else:
leftMotor = 0b00000000
rightMotor = 0b11000000
#set final byte values for both motors
leftMotor = leftMotor | speedBits
rightMotor = rightMotor | speedBits
#construct bytearray
arr = bytearray()
arr.append(leftMotor)
arr.append(rightMotor)
self.ser.write(arr)
arr.clear()
def steerMove(self, speed, steer):
#input correction
if speed > 100:
speed = 100
if speed < -100:
speed = -100
if steer > 100:
steer = 100
if steer < -100:
steer = -100
#record speed for future uses
self.prevSpeed = speed
self.prevSteer = steer
leftMotorSpeed = speed
rightMotorSpeed = speed
absSteer = abs(steer)
#calculate left and right motor speeds based on steer
if steer >= 0:
leftMotorSpeed = leftMotorSpeed + (absSteer / 2)
rightMotorSpeed = rightMotorSpeed - (absSteer/2)
else:
leftMotorSpeed = leftMotorSpeed - (absSteer / 2)
rightMotorSpeed = rightMotorSpeed + (absSteer/2)
#value limit correction
if leftMotorSpeed > 100:
leftMotorSpeed = 100
#elif leftMotorSpeed < 0:
#leftMotorSpeed = 0
if rightMotorSpeed > 100:
rightMotorSpeed = 100
#elif rightMotorSpeed < 0:
# rightMotorSpeed = 0
#1 is right and left is backward
direction = 1 if speed >= 0 else 0
#calculate the speed section for both motors
speedBitsRight = int(63 * (abs(rightMotorSpeed)/100))
speedBitsLeft = int(63 * (abs(leftMotorSpeed)/100))
if (rightMotorSpeed * leftMotorSpeed) < 0:
if rightMotorSpeed > 0:
leftMotor = 0b00000000
rightMotor = 0b11000000
else:
leftMotor = 0b01000000
rightMotor = 0b10000000
elif direction:
leftMotor = 0b00000000
rightMotor = 0b10000000
else:
leftMotor = 0b01000000
rightMotor = 0b11000000
#set final byte values for both motors
leftMotor = leftMotor | speedBitsLeft
rightMotor = rightMotor | speedBitsRight
#construct bytearray
arr = bytearray()
arr.append(leftMotor)
arr.append(rightMotor)
self.ser.write(arr)
arr.clear()
``` |
{
"source": "jmhuer/TCN",
"score": 2
} |
#### File: TCN/Dictionary_learning/my_tests.py
```python
from TCN.tcn import TemporalBlock, TemporalConvNet
import torch
from torch import nn
from TCN.tcn import TemporalConvNet
import torch.nn.functional as F
import torch
import torch.optim as optim
torch.manual_seed(42)
from kwta import Sparsify1D_kactive
from synthetic_data import create_synthetic_data
# class TCN(nn.Module):
# def __init__(self, input_size, output_size, num_channels, kernel_size, dropout):
# super(TCN, self).__init__()
# self.encoder = TemporalConvNet(input_size, num_channels, kernel_size, dropout=dropout)
# kernel_size, padding, stride, dilation = self.encoder.network[-1].conv1.kernel_size, self.encoder.network[-1].conv1.padding, self.encoder.network[-1].conv1.stride, self.encoder.network[-1].conv1.dilation
# print(kernel_size, padding, stride )
# in_channels, out_channels = self.encoder.network[-1].conv1.in_channels, self.encoder.network[-1].conv1.out_channels
# print("in", in_channels, out_channels)
# self.decoder = torch.nn.ConvTranspose1d(in_channels=num_channels[-1], out_channels=1, kernel_size=5, padding=0, dilation=1, stride=1)
#
# def forward(self, x):
# # x needs to have dimension (N, C, L) in order to be passed into CNN
# output = self.encoder(x.transpose(1, 2))
# print("~~~~~~~~out size ", output.size())
# output = self.decoder(output).double().transpose(1, 2)
# return output
synth_data = create_synthetic_data(size = 5000)
class autoencoder(nn.Module):
def __init__(self, input_size, output_size, num_channels, kernel_size, dropout, wta_k):
super(autoencoder, self).__init__()
self.wta = Sparsify1D_kactive(k = wta_k)
self.feature = TemporalConvNet(input_size, num_channels, kernel_size, dropout=dropout)
# self.encoder = torch.nn.Conv1d(in_channels=5, out_channels=10, kernel_size=5, padding=0, bias=False, stride=5)
# self.decoder = torch.nn.ConvTranspose1d(in_channels=10, out_channels=1, kernel_size=5, padding=0, bias=False, stride=5)
# self.encoder.weight.data.fill_(0.3)
# self.code = None
# def get_kernels(self):
# return self.decoder.weight.data[:,0,:]
# def feature_map(self, x):
# code = self.wta(self.encoder(x))
# return code
def forward(self, x):
# x needs to have dimension (N, C, L) in order to be passed into CNN
output = self.feature(x).double()
# print("~~~~~~~~feature size ", output.size())
# self.code = self.wta(self.encoder(output))
# # print("~~~~~~~~code size ", code.size())
# output = self.decoder(self.code ).double()
return output
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
print("Using device: ", device)
model = autoencoder(input_size=1, output_size=400, num_channels=[1, 15, 25], kernel_size=5, dropout=0.2, wta_k = 5).to(device)
inputs = torch.tensor(synth_data[:,None,:]).float().to(device)
print("Input size: ", inputs.size())
out = model(inputs)
print("Output size: ", out.size(), "\n")
loss_fn = torch.nn.L1Loss().to(device)
optimizer = optim.SGD(model.parameters(), lr=.05, weight_decay = 0.00001, momentum=0.05) ##this has weight decay just like you implemented
epochs = 3000
history = {"loss": []}
for i in range(epochs):
optimizer.zero_grad()
output = model(inputs)
#decaying WTA
if i % 500 == 0 and i != 0:
model.wta.k = max(1, model.wta.k - 1)
print("model.wta.k: ", model.wta.k)
loss = loss_fn(output, inputs)
loss.backward()
optimizer.step()
history["loss"].append(float(loss))
if i % 1 == 0:
print("Epoch : {} \t Loss : {} \t ".format(i, round(float(loss),7)))
# print("\nneg encoder ", float((model.encoder.weight.ravel() < 0).sum(dim=0)))
``` |
{
"source": "jmhull/genz-utils",
"score": 2
} |
#### File: blueprints/help/blueprint.py
```python
import flask
import requests as HTTP_REQUESTS
import socket
from pdb import set_trace
import flask_fat
Journal = self = flask_fat.Journal(__file__)
""" ----------------------- ROUTES --------------------- """
@Journal.BP.route('/help', methods=['GET'])
def register_external_api():
global Journal
app = Journal.mainapp.app
routes = []
for rule in app.url_map.iter_rules():
if rule.endpoint == 'static':
continue
route = { rule.rule : list(rule.methods) }
routes.append(route)
return flask.make_response(flask.jsonify(routes), 200)
```
#### File: blueprints/subscribe/blueprint.py
```python
import json
import logging
import os
import uuid
from datetime import datetime
from pdb import set_trace
from pprint import pprint
import flask
import jsonschema
import requests as HTTP_REQUESTS
from blueprints.resource.blueprint import send_resource
import flask_fat
Journal = self = flask_fat.Journal(__file__)
""" ------------------------------- ROUTES ------------------------------- """
@Journal.BP.route('/%s/add_event' % (Journal.name), methods=['POST'])
def add_subscribe():
"""
Subscribe to an Add event.
"""
response = {}
status = 'nothing'
code = 200
body = flask.request.get_json()
if not body:
body = flask.request.form
callback_endpoint = body.get('callback', None)
endpoint_alias = body.get('alias', None)
bridges = body.get('bridges', [])
if not endpoint_alias:
endpoint_alias = callback_endpoint
if callback_endpoint is None:
response['error'] = 'No callback in body!\n%s' % body
status = 'error'
code = 400
elif len(bridges) == 0:
response['error'] = 'No bridges in body!\n%s' % body
status = 'error'
code = 400
else:
if endpoint_alias in Journal.mainapp.add_callback:
status = 'Endpoint alias "%s" already in the list.' % endpoint_alias
code = 403
elif callback_endpoint in Journal.mainapp.add_callback.values():
status = 'Endpoint "%s" already in the list.' % callback_endpoint
code = 403
else:
status = 'Callback endpoint %s added' % callback_endpoint
if endpoint_alias != callback_endpoint:
status = '%s with the alias name "%s"' % (status, endpoint_alias)
Journal.mainapp.add_callback[endpoint_alias] = callback_endpoint
for br in bridges:
Journal.mainapp.add_callback[br] = callback_endpoint
try:
add = Journal.mainapp.conf.add[br]
except KeyError:
logging.debug('bridge {} has no Conf resources'.format(br))
continue
for res in add:
resp = send_resource(res, [callback_endpoint])
# Revisit: do something with resp
logging.info('subscribe/add_event: %s' % status)
response['status'] = status
return flask.make_response(flask.jsonify(response), code)
``` |
{
"source": "jmhummel/Continuation-passing-style",
"score": 3
} |
#### File: jmhummel/Continuation-passing-style/index.py
```python
def fact_rec(n):
if n == 0:
return 1
else:
return n * fact_rec(n-1)
def fact_cps(n, cont):
if n == 0:
return cont(1)
else:
return fact_cps(n-1, lambda value: cont(n * value))
def end_cont(n):
return n
def trampoline(f, *args):
v = f(*args)
while callable(v):
v = v()
return v
def fact_cps_thunked(n, cont):
if n == 0:
return cont(1)
else:
return lambda: fact_cps_thunked(
n - 1,
lambda value: lambda: cont(n * value))
def fib_rec(n):
if n <= 2:
return 1
else:
return fib_rec(n-1) + fib_rec(n-2)
def fib_cps(n, cont):
if n <= 2:
return cont(1)
else:
return fact_cps(
n-1,
lambda value1: fib_cps(
n-2,
lambda value2: cont(value1 + value2)
)
)
def fib_cps_thunked(n, cont):
if n <= 2:
return cont(1)
else:
return lambda: fib_cps_thunked(
n-1,
lambda value1: fib_cps_thunked(
n-2,
lambda value2: cont(value1 + value2)
)
)
def main():
try:
print(fact_rec(1000))
except RecursionError as e:
print(f'RecursionError: {e}')
# RecursionError: maximum recursion depth exceeded in comparison
try:
print(fact_cps(1000, end_cont))
except RecursionError as e:
print(f'RecursionError: {e}')
# RecursionError: maximum recursion depth exceeded in comparison
try:
print(trampoline(fact_cps_thunked(1000, end_cont)))
except RecursionError as e:
print(f'RecursionError: {e}')
# 40238726007709377354370243392300398571937486421071463254379...
try:
print(fib_cps(43, end_cont))
except RecursionError as e:
print(f'RecursionError: {e}')
# RecursionError: maximum recursion depth exceeded in comparison
try:
print(trampoline(fib_cps_thunked(43, end_cont)))
except RecursionError as e:
print(f'RecursionError: {e}')
# RecursionError: maximum recursion depth exceeded in comparison
if __name__ == '__main__':
main()
``` |
{
"source": "jmhummel/Gin-Rummy-ML",
"score": 3
} |
#### File: Gin-Rummy-ML/gin_rummy/knock_evaluation.py
```python
from operator import attrgetter
from typing import List
from gin_rummy.cards import Card
import logging
logger = logging.getLogger('knock_evaluation')
def point_value(card: Card) -> int:
v = card.rank.value + 1
if v > 10:
return 10
return v
def count_deadwood(cards: List[Card]) -> int:
return sum([point_value(card) for card in cards])
def is_run_meld(cards: List[Card]):
if len(cards) < 3:
return False
suit = cards[0].suit
rank = cards[0].rank
for i, card in enumerate(cards[1:]):
if card.suit != suit or card.rank.value != rank.value + i + 1:
return False
return True
def is_set_meld(cards: List[Card]):
if len(cards) < 3:
return False
rank = cards[0].rank
for card in cards:
if card.rank != rank:
return False
return True
def sort_by_value(cards: List[Card]):
""" Returns cards sorted first by number value, then by suit """
return sorted(cards, key=attrgetter('rank.value', 'suit.value'))
def sort_by_suit(cards: List[Card]):
""" Returns cards sorted first by suit, then by number value """
return sorted(cards, key=attrgetter('suit.value', 'rank.value'))
class MeldNode:
def __init__(self, meld, parent=None):
self.parent = parent
self.meld = meld
self.deadwood = count_deadwood(meld)
if parent:
self.deadwood += parent.deadwood
def clean_meld_group(melds, meld):
""" Returns a new array of melds, containing all melds from the initial group,
except for ones that contain cards from the given meld. """
meld_group = []
excluded_cards = set(meld)
return [m for m in melds if set(m).isdisjoint(excluded_cards)]
def build_meld_tree(melds, root=None):
"""
Returns the leaf node for which parent pointers can be followed to obtain the
best possible meld combinations.
This could be a O(n!) algorithm, where n is the number of melds. But in
normal use, it shouldn't ever approach something too infeasible, because any
large set of melds should include an enormous amount of overlapping melds,
which will be eliminated from recursive calls. The max recursion depth will
be equal to the largest number of non-overlapping melds.
"""
best = root
for meld in melds:
node = MeldNode(meld, root)
tree = build_meld_tree(clean_meld_group(melds, meld), node)
if not best or tree.deadwood > best.deadwood:
best = tree
return best
def get_meld_set(leaf_node):
""" Follows a path up to the root, and gets an array of melds """
arr = []
node = leaf_node
while node:
arr.append(node.meld)
node = node.parent
return arr
def get_best_combination(melds):
if not melds:
return 0, []
best_leaf = build_meld_tree(melds)
best_score = best_leaf.deadwood
best_melds = get_meld_set(best_leaf)
return best_score, best_melds
def get_all_melds(cards: List[Card]) -> List[List[Card]]:
all_melds = []
# First, check for 4 card sets of the same-numbered card
cards = sort_by_value(cards)
for i in range(len(cards) - 3):
pos_meld = cards[i:i+4]
if is_set_meld(pos_meld):
all_melds.append(pos_meld)
# When a 4-card meld is found, also add all the possible 3-card melds which
# won't be picked up by the subsequent 3-card scan.
all_melds.append([pos_meld[j] for j in [0, 1, 3]])
all_melds.append([pos_meld[j] for j in [0, 2, 3]])
# Next, check for 3 card sets of the same-numbered card
for i in range(len(cards) - 2):
pos_meld = cards[i:i+3]
if is_set_meld(pos_meld):
all_melds.append(pos_meld)
# Next, check for 3 card runs in the same suit
cards = sort_by_suit(cards)
for i in range(len(cards) - 2):
pos_meld = cards[i:i+3]
if is_run_meld(pos_meld):
all_melds.append(pos_meld)
# Next, check for 4 card runs
cards = sort_by_suit(cards)
for i in range(len(cards) - 3):
pos_meld = cards[i:i+4]
if is_run_meld(pos_meld):
all_melds.append(pos_meld)
# Next, check for 5 card runs
cards = sort_by_suit(cards)
for i in range(len(cards) - 4):
pos_meld = cards[i:i+5]
if is_run_meld(pos_meld):
all_melds.append(pos_meld)
# 6 or more card run are equivalent to multiple smaller runs.
return all_melds
def calc_optimal_deadwood(cards: List[Card]):
logger.info('calc_optimal_deadwood: ' + str(cards))
all_melds = get_all_melds(cards)
# Find the optimal set of melds.
all_melds.sort(key=count_deadwood)
logger.info('All melds:')
for meld in all_melds:
logger.info(meld)
best_score, best_melds = get_best_combination(all_melds)
deadwood = count_deadwood(cards) - best_score
logger.info(f"Optimal melds: {' '.join([str(m) for m in best_melds])}")
deadwood_cards = cards[:]
for meld in best_melds:
for card in meld:
deadwood_cards.remove(card)
logger.info(f"Deadwood: {', '.join([str(c) for c in sort_by_value(deadwood_cards)])} ({deadwood})")
return deadwood, best_melds
def can_knock(cards: List[Card]):
if len(cards) != 11:
raise Exception("Should only be called with exactly 11 cards")
deadwood, _ = calc_optimal_deadwood(cards)
if deadwood <= 20:
for i in range(len(cards)):
hand = cards[:i] + cards[i+1:]
logger.info("i: " + str(hand))
deadwood, _ = calc_optimal_deadwood(hand)
if deadwood <= 10:
return True
return False
def get_layable_melds(existing_melds: List[List[Card]], cards: List[Card]) -> List[List[Card]]:
layable_melds = []
# First, check 3 card sets of the same-numbered card
existing_sets = [meld for meld in existing_melds if is_set_meld(meld) and len(meld) == 3]
for set_meld in existing_sets:
rank = set_meld[0].rank
for card in cards:
if card.rank.value == rank.value:
layable_melds.append([card])
# Next, check for runs in the same suit
cards = sort_by_value(cards)
existing_runs = [meld for meld in existing_melds if is_run_meld(meld)]
for run_meld in existing_runs:
suit = run_meld[0].suit
start_rank = run_meld[0].rank
end_rank = run_meld[-1].rank
for i in range(len(cards)):
if cards[i].suit.value == suit.value:
if cards[i].rank.value == start_rank.value - 2 and i+1<len(cards) and \
cards[i+1].rank.value == start_rank.value - 1:
layable_melds.append([cards[i], cards[i+1]])
if cards[i].rank.value == start_rank.value - 1:
layable_melds.append([cards[i]])
if cards[i].rank.value == end_rank.value + 1:
layable_melds.append([cards[i]])
if cards[i].rank.value == end_rank.value + 1 and i+1<len(cards) \
and cards[i+1].rank.value == start_rank.value + 2:
layable_melds.append([cards[i], cards[i+1]])
# 3 or more card lays on opp's runs are equivalent to own 3 card run
return layable_melds
def evaluate_knock(player_hand, opponent_hand):
player_deadwood, player_melds = calc_optimal_deadwood(player_hand)
# Calculate best melds for opponent, allowing lays extending player's melds
cards_to_consider = opponent_hand[:]
for meld in player_melds:
cards_to_consider.extend(meld)
all_melds = get_layable_melds(player_melds, opponent_hand) + get_all_melds(opponent_hand)
# Find the optimal set of melds.
all_melds.sort(key=count_deadwood)
logger.info('All melds:')
for meld in all_melds:
logger.info(meld)
opponent_score, opponent_melds = get_best_combination(all_melds)
opponent_deadwood = count_deadwood(opponent_hand) - opponent_score
logger.info(f"Opponent melds: {' '.join([str(m) for m in opponent_melds])}")
opponent_deadwood_cards = opponent_hand[:]
for meld in opponent_melds:
for card in meld:
opponent_deadwood_cards.remove(card)
opponent_deadwood_cards_str = ', '.join([str(c) for c in sort_by_value(opponent_deadwood_cards)])
logger.info(f"Opponent Deadwood: {opponent_deadwood_cards_str} ({opponent_deadwood})")
```
#### File: Gin-Rummy-ML/mcts/mcts.py
```python
from abc import abstractmethod
class Game:
@abstractmethod
def get_cur_player(self):
"""
Returns:
int: current player idx
"""
pass
@abstractmethod
def get_action_size(self):
"""
Returns:
int: number of all possible actions
"""
pass
@abstractmethod
def get_valid_actions(self, player):
"""
Input:
player: player
Returns:
validActions: a binary vector of length self.get_action_size(), 1 for
moves that are valid from the current board and player,
0 for invalid moves
"""
pass
@abstractmethod
def take_action(self, action):
"""
Input:
action: action taken by the current player
Returns:
double: score of current player on the current turn
int: player who plays in the next turn
"""
pass
@abstractmethod
def get_observation_size(self):
"""
Returns:
(x,y,z): a tuple of observation dimensions
"""
pass
@abstractmethod
def get_observation(self, player):
"""
Input:
player: current player
Returns:
observation matrix which will serve as an input to agent.predict
"""
pass
@abstractmethod
def get_observation_str(self, observation):
"""
Input:
observation: observation
Returns:
string: a quick conversion of state to a string format.
Required by MCTS for hashing.
"""
pass
@abstractmethod
def is_ended(self):
"""
This method must return True if is_draw returns True
Returns:
boolean: False if game has not ended. True otherwise
"""
pass
@abstractmethod
def is_draw(self):
"""
Returns:
boolean: True if game ended in a draw, False otherwise
"""
pass
@abstractmethod
def get_score(self, player):
"""
Input:
player: current player
Returns:
double: reward in [-1, 1] for player if game has ended
"""
pass
@abstractmethod
def clone(self):
"""
Returns:
Game: a deep clone of current Game object
"""
pass
class Agent:
@abstractmethod
def predict(self, game, game_player):
"""
Returns:
policy, value: stochastic policy and a continuous value of a game observation
"""
``` |
{
"source": "jmhummel/Tic-Tac-Toe-ML",
"score": 4
} |
#### File: jmhummel/Tic-Tac-Toe-ML/tic_tac_toe.py
```python
from typing import List
class TicTacToe:
def __init__(self):
self.state = [0] * 9
self.turn = 1
self.current_player = 1
def get_action_size(self):
return 9
def get_valid_actions(self):
return tuple(1 if square == 0 else 0 for square in self.state)
def get_state(self):
return tuple(self.state)
def take_action(self, action):
self.state[action] = self.current_player
self.current_player = 2 if self.current_player == 1 else 1
def check_squares(self, indexes: List[int]) -> int or None:
mark = self.state[indexes[0]]
for i in indexes[1:]:
if mark != self.state[i]:
return None
return mark
def is_draw(self):
for square in self.state:
if square == 0:
return False
return True
def is_ended(self):
return self.get_winner() or self.is_draw()
def get_winner(self):
for indexes in [
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[0, 3, 6],
[1, 4, 7],
[2, 5, 8],
[0, 4, 8],
[2, 4, 6],
]:
winner = self.check_squares(indexes)
if winner:
return winner
return None
def get_score(self):
winner = self.get_winner()
if winner == 1:
return 1
elif winner == 2:
return -1
return 0
``` |
{
"source": "jmhummel/Traveling-Saleman-Genetic-Algorithm",
"score": 2
} |
#### File: jmhummel/Traveling-Saleman-Genetic-Algorithm/index.py
```python
import pickle
import random
import operator
import time
import sys
import maputils
USE_STOCHASTIC_SELECT = True
USE_ELITISM = True
CAPITALS = maputils.CAPITALS
cachedDistDur = pickle.load(open('cache.p', 'r'))
def getDistDur(origin, destination):
# print origin, destination
key = tuple(sorted([origin, destination]))
if key in cachedDistDur:
# print origin, destination, 'from cache'
return cachedDistDur[key]
else:
raise Exception('Not found in cache')
NUM_POP = 100
NUM_SEEDS = NUM_POP
def init():
seeds = []
for i in xrange(NUM_SEEDS):
r = range(0, len(CAPITALS))
# r = range(0, 16) # Test using only 5 capitals to speed up, and reduce api calls
random.shuffle(r)
seeds.append(r)
return seeds
def deDup(route):
idx = route.index(0)
route = route[idx:] + route[:idx]
if route[-1] < route[1]:
return tuple(route[:1] + list(reversed(route[1:])))
return tuple(route)
def calcDuration(route):
route = tuple(route)
# if route in cachedRouteDur:
# print cachedRouteDur[route], route, 'cached'
# return cachedRouteDur[route]
list1 = route
list2 = route[1:] + route[:1]
legs = zip(list1, list2)
totalDur = 0
for leg in legs:
point1 = CAPITALS[leg[0]]
point2 = CAPITALS[leg[1]]
totalDur += getDistDur(point1, point2)[1]
# cachedRouteDur[route] = totalDur
# print totalDur, route
return totalDur
def normalize(durationMap):
totalInvertedDuration = 0
for route, duration in durationMap.iteritems():
totalInvertedDuration += 1 / float(duration)
fitnessMap = {}
for route, duration in durationMap.iteritems():
fitnessMap[route] = (1 / float(duration)) / totalInvertedDuration
return fitnessMap
def calcAccumulatedFitness(fitnessMap):
sortedFitnessList = sorted(fitnessMap.items(), key=operator.itemgetter(1), reverse=True)
accumulatedFitnessList = []
accumulated = 0.0
for t in sortedFitnessList:
accumulatedFitnessList.append((t[0], t[1] + accumulated))
accumulated += t[1]
return accumulatedFitnessList
# Fitness proportionate selection
def select(accumulatedFitnessMap):
r = random.random()
j = 0
# print 'r', r
for chromosome in accumulatedFitnessMap:
if r < chromosome[1]:
return chromosome[0]
# Stochastic acceptance selection
def stochasticSelect(fitnessMap):
maxFitness = max(fitnessMap.values())
while True:
chromosome = random.choice(fitnessMap.keys())
r = random.random()
if r < fitnessMap[chromosome] / float(maxFitness):
return chromosome
# Partially matched crossover
def crossover(mate1, mate2):
# print mate1
# print mate2
r1 = random.randint(0, len(mate1))
r2 = random.randint(0, len(mate1))
point1 = min(r1, r2)
point2 = max(r1, r2)
offspring1 = list(mate2)
for i in xrange(point1, point2):
if offspring1[i] != mate1[i]:
for j in xrange(len(offspring1)):
if offspring1[j] == mate1[i]:
offspring1[j] = offspring1[i]
offspring1[i] = mate1[i]
offspring2 = list(mate1)
for i in xrange(point1, point2):
if offspring2[i] != mate2[i]:
for j in xrange(len(offspring2)):
if offspring2[j] == mate2[i]:
offspring2[j] = offspring2[i]
offspring2[i] = mate2[i]
# print point1, point2
# print offspring
return [offspring1, offspring2]
CROSSOVER_RATE = 0.7
# Ordered crossover
def orderedCrossover(mate1, mate2):
# print mate1
# print mate2
start = random.randint(0, len(mate1))
length = random.randint(0, len(mate1))
mate1 = mate1[start:] + mate1[:start]
mate2 = mate2[start:] + mate2[:start]
s = set(mate1[:length])
l = [x for x in mate2 if x not in s]
offspring1 = list(mate1[:length]) + l
s = set(mate2[:length])
l = [x for x in mate1 if x not in s]
offspring2 = list(mate2[:length]) + l
return [offspring1, offspring2]
MUTATION_RATE = 0.015
def mutate(chromosome):
for i in xrange(len(chromosome)-1):
if random.random() <= MUTATION_RATE:
j = random.randint(0, len(chromosome)-1)
tmp = chromosome[i]
chromosome[i] = chromosome[j]
chromosome[j] = tmp
# Reverse subsection
if random.random() <= 0.2:
start = random.randint(0, len(chromosome)-1)
length = random.randint(4, len(chromosome)-4)
chromosome = chromosome[start:] + chromosome[:start]
chromosome = list(reversed(chromosome[:length])) + chromosome[length:]
ITERATIONS = 100000
# MAX_DURATION = 2000000
MAX_DURATION = 0
def ga():
startTime = time.time()
bestRoute = None
bestDur = 0
routes = init()
stats = []
loop = 0
while bestRoute is None or bestDur > MAX_DURATION:
routes = [ deDup(route) for route in routes ]
durationMap = {}
for route in routes:
durationMap[tuple(route)] = calcDuration(route)
fitnessMap = normalize(durationMap)
if (USE_STOCHASTIC_SELECT):
shortestRoute = min(durationMap.items(), key=operator.itemgetter(1))[0]
else:
accumulatedFitnessList = calcAccumulatedFitness(fitnessMap)
shortestRoute = accumulatedFitnessList[0][0]
averageDuration = float(sum(durationMap.values())) / len(durationMap)
if (bestRoute is None or durationMap[shortestRoute] < bestDur):
bestRoute = shortestRoute
bestDur = durationMap[shortestRoute]
# stats.append(str(bestRoute) + ' ' + str(bestDur) + ' ' + str(averageDuration) + ' ' + str(loop) + ' ' + str(time.time() - startTime))
stats.append(str(bestDur) + '\t' + str(bestRoute) + '\t' + str(averageDuration) + '\t' + str(loop) + '\t' + str(time.time() - startTime))
print stats[-1]
# print '\n'.join(stats)
if USE_ELITISM:
routes = [bestRoute]
else:
routes = []
for i in xrange(NUM_POP / 2):
if (USE_STOCHASTIC_SELECT):
mate1 = stochasticSelect(fitnessMap)
mate2 = stochasticSelect(fitnessMap)
else:
mate1 = select(accumulatedFitnessList)
mate2 = select(accumulatedFitnessList)
offspring = orderedCrossover(mate1, mate2)
offspring1 = offspring[0]
offspring2 = offspring[1]
mutate(offspring1)
mutate(offspring2)
routes.append(offspring1)
routes.append(offspring2)
loop += 1
# print '\n'.join(stats)
# print stats[-1]
# print [CAPITALS[i] for i in bestRoute]
return time.time() - startTime
NUMBER_OF_TESTS = 20
def test():
totalTime = 0
for i in xrange(NUMBER_OF_TESTS):
totalTime += ga()
print totalTime / NUMBER_OF_TESTS
#test()
ga()
``` |
{
"source": "jmhummel/xcube",
"score": 3
} |
#### File: jmhummel/xcube/xcube.py
```python
import sys
import re
import copy
__author__ = "<NAME>"
__copyright__ = "Copyright 2016"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Prototype"
__date__ = "2016-01-14"
colors = {1: 'red',
2: 'orange',
3: 'yellow',
4: 'green',
5: 'blue',
6: 'white'}
class XCube():
def __init__(self):
self.state = []
def main():
cube = XCube()
if __name__ == '__main__':
main()
``` |
{
"source": "jmhuus/OpticNerve",
"score": 2
} |
#### File: OpticNerve/backend/model.py
```python
import os
import sys
from sqlalchemy import Column, String, Integer
from flask_sqlalchemy import SQLAlchemy
import json
import utils
application_path = utils.get_base_application_path()
utils.ensure_path_available(application_path+"backend/")
# os.chmod(application_path+"backend/optic-nerve.db", 0o777)
database_path = f"sqlite:///{application_path}backend/optic-nerve.db"
db = SQLAlchemy()
def setup_db(app):
app.config["SQLALCHEMY_DATABASE_URI"] = database_path
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db.app = app
db.init_app(app)
return db
class Camera(db.Model):
id = db.Column(db.Integer, primary_key=True)
camera_state = db.Column(db.Integer, nullable=True)
image_file_name = db.Column(db.String, nullable=True)
STATE_PENDING_CAPTURE = 1
STATE_COMPLETE = 2
def __repr__(self):
return f"<Camera {self.id} {self.camera_state}>"
```
#### File: backend/ptp/PtpAbstractTransport.py
```python
import struct
import time
class PtpRequest:
"""Class encapsulating a PTP Request."""
def __init__(self, opcode, sessionid, transactionid, params=None):
self.opcode = opcode
self.sessionid = sessionid
self.transactionid = transactionid
self.params = () if params == None else params
def __str__(self):
tmp = "Opcode:0x%04x\nSessionId:0x%04x\nTransactionId:0x%04x\n" % (self.opcode, self.sessionid, self.transactionid)
for p in self.params:
tmp += repr(p) + "\n"
return tmp
class PtpResponse:
"""Class encapsulating a PTP Response."""
def __init__(self, respcode, sessionid, transactionid, params=None):
self.respcode = respcode
self.sessionid = sessionid
self.transactionid = transactionid
self.params = () if params == None else params
def __str__(self):
tmp = "Respcode:0x%04x\nSessionId:0x%04x\nTransactionId:0x%04x\n" % (self.respcode, self.sessionid, self.transactionid)
for p in self.params:
tmp += repr(p) + "\n"
return tmp
class PtpEvent:
"""Class encapsulating a PTP Event."""
def __init__(self, eventcode, sessionid, transactionid, params=None):
self.eventcode = eventcode
self.sessionid = sessionid
self.transactionid = transactionid
self.params = () if params == None else params
def __str__(self):
tmp = "Eventcode:0x%04x\nSessionId:0x%04x\nTransactionId:0x%04x\n" % (self.eventcode, self.sessionid, self.transactionid)
for p in self.params:
tmp += repr(p) + "\n"
return tmp
class PtpAbstractTransport:
"""Class defining an abstract PTP transport."""
def __init__(self):
raise NotImplementedError('Cannot create an instance of PtpAbstractTransport')
def NewSession(self):
"""Get a new session id for this transport.
Returns: A new session ID."""
if not hasattr(self, 'sessionid'):
self.sessionid = 0
self.sessionid += 1
return self.sessionid
def send_ptp_request(self, request):
"""Transport specific code to send a PtpRequest structure to a PTP device.
Arguments:
request --- A PtpRequest to send."""
raise NotImplementedError('send_ptp_request not implemented')
def send_ptp_data(self, request, data):
"""Transport specific code to send in-memory data to a PTP device.
Arguments:
request -- The PtpRequest.
data -- String of data to send."""
raise NotImplementedError('send_ptp_data not implemented')
def get_ptp_data(self, request, stream = None):
"""Transport specific code to get data from a PTP device.
Arguments:
request -- The PtpRequest.
stream -- A stream to which data should be written to if desired.
Returns:
A tuple of (data size, received data as string)
Note: received data as string will be None if stream was supplied."""
raise NotImplementedError('get_ptp_data not implemented')
def get_ptp_response(self, request):
"""Transport specific code to get a PtpResponse from a PTP device.
Arguments:
request -- The PtpRequest.
Returns:
A PtpResponse object."""
raise NotImplementedError('get_ptp_response not implemented')
def check_ptp_event(self, sessionid, timeout=None):
raise NotImplementedError('check_ptp_event not implemented')
def ptp_simple_transaction(self, request, tx_data=None, receiving=False):
"""Perform a simple PTP operation.
Arguments:
request -- A PTPRequest class instance
tx_data -- Data to transmit, or None
receiving -- Are we expecting to receive data?
Returns:
A tuple of (PTPResponse, received data as string)."""
rx_data = None
response = None
self.send_ptp_request(request)
if tx_data != None:
self.send_ptp_data(request, tx_data)
elif receiving:
time.sleep(0.5)
rx_data = self.get_ptp_data(request)
if isinstance(rx_data, PtpResponse):
response = rx_data
rx_data = None
if response == None:
time.sleep(0.5)
response = self.get_ptp_response(request)
return (response, rx_data)
``` |
{
"source": "jmi2k/sexpy",
"score": 3
} |
#### File: src/sexpy/__init__.py
```python
DELIMS = {
'(': ')',
'[': ']',
'{': '}',
}
class ParseException(Exception):
def __init__(self, line, col, message, *args, **kwargs):
super().__init__(f'{line}:{col}: {message}', *args, **kwargs)
self.line = line
self.col = col
def _atom(cur):
"""Parse atom (literal values)."""
match = ''
# Parse whole token
while cur[0]:
head, ncur = _fetch(cur)
if head.isspace() or head in {*DELIMS, *DELIMS.values()}:
break
match += head
cur = ncur
# If the token is an integer, convert it to a native Python value
try:
match = int(match)
except ValueError:
pass
# If the token is a boolean, convert it to a native Python value
if match == 'True':
match = True
elif match == 'False':
match = False
return match, cur
def _fetch(cur):
"""Extract the next character from a cursor and advance its position."""
src, line, col = cur
head, *tail = src
if head == '\n':
return head, (tail, line+1, 1)
else:
return head, (tail, line, col+1)
def _parse(cur, delim):
"""Parse an S-expression string into a Python nested list structure."""
sexpr = []
val = None
while cur[0]:
head, ncur = _fetch(cur)
# Handle character accordingly
if head.isspace():
cur = ncur
continue
elif head in DELIMS:
val, ncur = _parse(ncur, DELIMS[head])
elif head == delim:
return sexpr, ncur
elif head in DELIMS.values():
_, line, col = cur
raise ParseException(line, col, f"mismatched delimiters (expected '{delim}', got '{head}')")
else:
val, ncur = _atom(cur)
sexpr.append(val)
cur = ncur
# This can only be reached when delimiters are balanced.
# If a delimiter is expected, the expression is malformed.
if delim:
_, line, col = cur
raise ParseException(line, col, f"unexpected EOF (expected '{delim}')")
return sexpr, cur
def loads(src, schema=None):
"""Parse an S-expression string into a Python nested list structure."""
cur = src, 1, 1
sexpr, _ = _parse(cur, None)
return sexpr if not schema else schema.extract(sexpr)
def dumps(sexpr):
"""Turn a nested list structure into its equivalent S-expression string."""
if type(sexpr) is list:
inside = ' '.join((dumps(elem) for elem in sexpr))
return f'({inside})'
elif type(sexpr) is str:
return sexpr
elif type(sexpr) in {int, bool}:
return str(sexpr)
``` |
{
"source": "jmibanez/py-environ",
"score": 2
} |
#### File: jmibanez/py-environ/py_environ.py
```python
import logging
import os
from six.moves.configparser import SafeConfigParser
log = logging.getLogger(__name__)
TRUE_EQUIVALENT_STRINGS = ["1", "yes", "true", "on"]
FALSE_EQUIVALENT_STRINGS = ["0", "no", "false", "off"]
def to_environ_key(k):
return k.upper() \
.replace('-', '_') \
.replace('.', '_DOT_') \
.replace(' ', '_') \
.replace(':', '_') \
class EnvironmentConfigWrapper(SafeConfigParser, object):
def has_option(self, section, option):
env_prefix = to_environ_key(section)
env_option = to_environ_key(option)
env_name = "%s_%s" % (env_prefix, env_option)
if env_name in os.environ:
return True
else:
return super(EnvironmentConfigWrapper, self).has_option(section, option)
def get(self, section, option, **kwargs):
env_prefix = to_environ_key(section)
env_option = to_environ_key(option)
env_name = "%s_%s" % (env_prefix, env_option)
if env_name in os.environ:
return os.environ[env_name]
else:
return super(EnvironmentConfigWrapper, self).get(section, option, **kwargs)
``` |
{
"source": "JMichaelAdams/hjuutilainen-recipes",
"score": 3
} |
#### File: hjuutilainen-recipes/SharedProcessors/ChecksumVerifier.py
```python
from __future__ import absolute_import
import hashlib
import os
from autopkglib import Processor, ProcessorError
__all__ = ["ChecksumVerifier"]
# Default options
DEFAULT_ALGORITHM = "SHA1"
class ChecksumVerifier(Processor):
"""Verifies the checksum of a given file"""
input_variables = {
"pathname": {
"required": True,
"description": "File path to verify.",
},
"checksum": {
"required": True,
"description": "The expected checksum.",
},
"algorithm": {
"required": False,
"description": "Algorithm to use. Supported values are "
"SHA1, SHA224, SHA256, SHA384, SHA512 or MD5. "
"If not defined, SHA1 is assumed.",
},
}
output_variables = {
}
description = __doc__
def calculate_checksum(self, file_path=None, hasher=None):
"""Calculates a checksum by reading input file in chunks
http://stackoverflow.com/a/3431838
:param file_path: The input file to hash
:param hasher: Hash type to use
"""
if not hasher or not file_path:
return None
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hasher.update(chunk)
return hasher.hexdigest()
def main(self):
# We absolutely need the input path and an expected checksum
input_path = self.env.get("pathname", None)
if not os.path.exists(input_path):
raise ProcessorError("Error: File %s does not exist." % input_path)
checksum = self.env.get("checksum", None)
if not checksum or checksum == "":
raise ProcessorError("Error: Expected checksum is empty.")
# Calculate and verify the checksum
algorithm = self.env.get("algorithm", DEFAULT_ALGORITHM)
self.output("Calculating %s checksum for %s" % (algorithm, input_path))
calculated_checksum = self.calculate_checksum(input_path, hashlib.new(algorithm))
self.output("Calculated checksum: %s" % calculated_checksum)
self.output("Expected checksum: %s" % checksum)
if calculated_checksum == checksum:
self.output("Calculated checksum matches the expected checksum.")
else:
raise ProcessorError("Error: Calculated checksum does not match expected checksum")
if __name__ == "__main__":
processor = ChecksumVerifier()
processor.execute_shell()
``` |
{
"source": "JMichaelStringer/NeMo",
"score": 2
} |
#### File: data/text_normalization/tagger_dataset.py
```python
from tqdm import tqdm
from transformers import PreTrainedTokenizerBase
import nemo.collections.nlp.data.text_normalization.constants as constants
from nemo.collections.nlp.data.text_normalization.utils import basic_tokenize, read_data_file
from nemo.core.classes import Dataset
from nemo.utils.decorators.experimental import experimental
__all__ = ['TextNormalizationTaggerDataset']
@experimental
class TextNormalizationTaggerDataset(Dataset):
"""
Creates dataset to use to train a DuplexTaggerModel.
Converts from raw data to an instance that can be used by Dataloader.
For dataset to use to do end-to-end inference, see TextNormalizationTestDataset.
Args:
input_file: path to the raw data file (e.g., train.tsv). For more info about the data format, refer to the `text_normalization doc <https://github.com/NVIDIA/NeMo/blob/main/docs/source/nlp/text_normalization.rst>`.
tokenizer: tokenizer of the model that will be trained on the dataset
mode: should be one of the values ['tn', 'itn', 'joint']. `tn` mode is for TN only. `itn` mode is for ITN only. `joint` is for training a system that can do both TN and ITN at the same time.
do_basic_tokenize: a flag indicates whether to do some basic tokenization before using the tokenizer of the model
tagger_data_augmentation (bool): a flag indicates whether to augment the dataset with additional data instances
lang: language of the dataset
"""
def __init__(
self,
input_file: str,
tokenizer: PreTrainedTokenizerBase,
mode: str,
do_basic_tokenize: bool,
tagger_data_augmentation: bool,
lang: str,
):
assert mode in constants.MODES
assert lang in constants.SUPPORTED_LANGS
self.mode = mode
self.lang = lang
raw_insts = read_data_file(input_file)
# Convert raw instances to TaggerDataInstance
insts = []
for (_, w_words, s_words) in tqdm(raw_insts):
for inst_dir in constants.INST_DIRECTIONS:
if inst_dir == constants.INST_BACKWARD and mode == constants.TN_MODE:
continue
if inst_dir == constants.INST_FORWARD and mode == constants.ITN_MODE:
continue
# Create a new TaggerDataInstance
inst = TaggerDataInstance(w_words, s_words, inst_dir, do_basic_tokenize)
insts.append(inst)
# Data Augmentation (if enabled)
if tagger_data_augmentation:
filtered_w_words, filtered_s_words = [], []
for ix, (w, s) in enumerate(zip(w_words, s_words)):
if not s in constants.SPECIAL_WORDS:
filtered_w_words.append(w)
filtered_s_words.append(s)
if len(filtered_s_words) > 1:
inst = TaggerDataInstance(filtered_w_words, filtered_s_words, inst_dir)
insts.append(inst)
self.insts = insts
texts = [inst.input_words for inst in insts]
tags = [inst.labels for inst in insts]
# Tags Mapping
self.tag2id = {tag: id for id, tag in enumerate(constants.ALL_TAG_LABELS)}
# Finalize
self.encodings = tokenizer(texts, is_split_into_words=True, padding=False, truncation=True)
self.labels = self.encode_tags(tags, self.encodings)
def __getitem__(self, idx):
item = {key: val[idx] for key, val in self.encodings.items()}
item['labels'] = self.labels[idx]
return item
def __len__(self):
return len(self.labels)
def encode_tags(self, tags, encodings):
encoded_labels = []
for i, label in enumerate(tags):
word_ids = encodings.word_ids(batch_index=i)
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
# Special tokens have a word id that is None. We set the label
# to -100 (LABEL_PAD_TOKEN_ID) so they are automatically
# ignored in the loss function.
if word_idx is None:
label_ids.append(constants.LABEL_PAD_TOKEN_ID)
# We set the label for the first token of each word.
elif word_idx != previous_word_idx:
label_id = self.tag2id[constants.B_PREFIX + label[word_idx]]
label_ids.append(label_id)
# We set the label for the other tokens in a word
else:
label_id = self.tag2id[constants.I_PREFIX + label[word_idx]]
label_ids.append(label_id)
previous_word_idx = word_idx
encoded_labels.append(label_ids)
return encoded_labels
class TaggerDataInstance:
"""
This class represents a data instance in a TextNormalizationTaggerDataset.
Args:
w_words: List of words in the written form
s_words: List of words in the spoken form
direction: Indicates the direction of the instance (i.e., INST_BACKWARD for ITN or INST_FORWARD for TN).
do_basic_tokenize: a flag indicates whether to do some basic tokenization before using the tokenizer of the model
"""
def __init__(self, w_words, s_words, direction, do_basic_tokenize=False):
# Build input_words and labels
input_words, labels = [], []
# Task Prefix
if direction == constants.INST_BACKWARD:
input_words.append(constants.ITN_PREFIX)
if direction == constants.INST_FORWARD:
input_words.append(constants.TN_PREFIX)
labels.append(constants.TASK_TAG)
# Main Content
for w_word, s_word in zip(w_words, s_words):
# Basic tokenization (if enabled)
if do_basic_tokenize:
w_word = ' '.join(basic_tokenize(w_word, self.lang))
if not s_word in constants.SPECIAL_WORDS:
s_word = ' '.join(basic_tokenize(s_word, self.lang))
# Update input_words and labels
if s_word == constants.SIL_WORD and direction == constants.INST_BACKWARD:
continue
if s_word == constants.SELF_WORD:
input_words.append(w_word)
labels.append(constants.SAME_TAG)
elif s_word == constants.SIL_WORD:
input_words.append(w_word)
labels.append(constants.PUNCT_TAG)
else:
if direction == constants.INST_BACKWARD:
input_words.append(s_word)
if direction == constants.INST_FORWARD:
input_words.append(w_word)
labels.append(constants.TRANSFORM_TAG)
self.input_words = input_words
self.labels = labels
``` |
{
"source": "jmichalczyk/motion-planning-walking",
"score": 2
} |
#### File: motion-planning-walking/walking/loop.py
```python
import sys
import os
path = os.path.abspath(os.path.join(os.path.abspath(
os.path.join(os.path.dirname(__file__), "..")), "python"))
sys.path.append(path)
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import python.system_model
import python.mpc
import python.plotting
def main():
#simulation constants
dt = 0.1
t_step = 0.8
future_steps = 2
#robot constants
h_CoM = 0.75
foot_length = 0.144
foot_width = 0.04
h_step = 0.07
feet = [foot_length, foot_width]
#instantiate the linear system model
#model = python.system_model.SystemModelDCM(h_CoM)
model = python.system_model.SystemModel(h_CoM)
#build the time vector
time_sim = 10.0
time = np.arange(0, time_sim, dt)
ntime = time.size
#instantiate the MPC object
mpc = python.mpc.RestrictedZoneMPC(model, ntime, dt, t_step, future_steps, feet)
#generate the reference speeds
vref_x = 0.1*np.ones((ntime, 1))
vref_y = 0.0*np.ones((ntime, 1))
vref_theta = 0.0*np.ones((ntime, 1))
vref = np.hstack((vref_x, vref_y, vref_theta))
#solutions placeholders
CoPs = mpc.CoP.copy()
states = mpc.x.copy()
current_foots = mpc.f_current.copy()
controls = mpc.controls.copy()
#main loop
i = 0
for t in time:
results = mpc.solve(i, vref)
states = np.vstack((states, results[0]))
current_foots = np.vstack((current_foots, results[1]))
CoPs = np.hstack((CoPs, results[2]))
controls = np.vstack((controls, results[3]))
i = i + 1
#subsample the CoM and CoP and current_foots plots - don't subsample constraints
st, cop, tms, tm, cstr = python.plotting.subsample(feet, model, states, controls, current_foots, time_sim, dt, 0.005)
#generate trajectories for tracking
pyx, pyy, pyz, pytheta = python.plotting.generate_trajectories(st, current_foots, h_step, 0.005)
#plots
fig, ax = plt.subplots(1)
plt.title('walking pattern - CoP in the restricted zone')
ax.set_xlabel('x [m]')
ax.set_ylabel('y [m]')
plt.axis('equal')
for foot in current_foots:
plt.plot(foot[0], foot[1], 'bo')
#plot rotated feet
rectangle = patches.Rectangle((foot[0]-foot_length/2, foot[1]-foot_width/2), foot_length, foot_width, color="red", fill=False)
transform = matplotlib.transforms.Affine2D().rotate_around(foot[0], foot[1], foot[2]) + ax.transData
rectangle.set_transform(transform)
ax.add_patch(rectangle)
#plot restriction zones
circle = plt.Circle((foot[0], foot[1]), 2*mpc.zone*np.sqrt(2)/2, color='b', fill=False)
ax.add_patch(circle)
square = patches.Rectangle((foot[0] - mpc.zone, foot[1] - mpc.zone), 2*mpc.zone, 2*mpc.zone, color='y', fill=False)
ax.add_patch(square)
#plot CoM and CoP
plt.plot(cop[0, :], cop[1, :], 'g')
plt.plot(st[:, 0], st[:, 3], 'b')
#plot time evolution of feet trajectory coords
fig2, ax2 = plt.subplots(1)
plt.title('feet and CoM acceleration')
ax2.set_ylabel('accel [m/s^2]')
ax2.set_xlabel('time [s]')
plt.axis('equal')
#4.8s is the time needed for exactly six steps - use instead of time_sim
plt.plot(np.linspace(0, time_sim, pyz.size), pyz.ravel(), 'r')
plt.plot(np.linspace(0, time_sim, pytheta.size), pytheta.ravel(), 'g')
plt.show()
if __name__ == '__main__':
main()
```
#### File: walking/python/system_model.py
```python
import numpy as np
#model of the linear system
class SystemModel(object):
#gravity
_g = 9.81
def __init__(self, h_CoM):
self.h_CoM = h_CoM
def A(self, T):
M = np.array([[1, T, T**2/2],
[0, 1, T],
[0, 0, 1]])
A_row1 = np.hstack((M, np.zeros((3, 3)), np.zeros((3, 3))))
A_row2 = np.hstack((np.zeros((3, 3)), M, np.zeros((3, 3))))
A_row3 = np.hstack((np.zeros((3, 3)), np.zeros((3, 3)), M))
A = np.vstack((A_row1, A_row2, A_row3))
return A
def B(self, T):
M_col1 = np.array([[T**3/6, T**2/2, T, 0, 0, 0, 0, 0, 0]]).T
M_col2 = np.array([[0, 0, 0, T**3/6, T**2/2, T, 0, 0, 0]]).T
M_col3 = np.array([[0, 0, 0, 0, 0, 0, T**3/6, T**2/2, T]]).T
B = np.hstack((M_col1, M_col2, M_col3))
return B
def D(self, T):
D_row1 = np.array([[1, 0, -self.h_CoM/SystemModel._g, 0, 0, 0, 0, 0, 0]])
D_row2 = np.array([[0, 0, 0, 1, 0, -self.h_CoM/SystemModel._g, 0, 0, 0]])
D = np.vstack((D_row1, D_row2))
return D
class SystemModelDCM(object):
#gravity
_g = 9.81
def __init__(self, h_CoM):
self.h_CoM = h_CoM
self.omega = np.sqrt(SystemModelDCM._g / h_CoM)
def A(self, T):
M = np.array([[1, T, (1.0 / (self.omega**2)) * (np.cosh(self.omega * T) - 1.0)],
[0, 1, (1.0 / self.omega) * np.sinh(self.omega * T) ],
[0, 0, np.cosh(self.omega * T) ]])
A_row1 = np.hstack((M, np.zeros((3, 3)), np.zeros((3, 3))))
A_row2 = np.hstack((np.zeros((3, 3)), M, np.zeros((3, 3))))
A_row3 = np.hstack((np.zeros((3, 3)), np.zeros((3, 3)), M))
A = np.vstack((A_row1, A_row2, A_row3))
return A
def B(self, T):
M_col1 = np.array([[(1.0 / (self.omega**3))*(np.sinh(self.omega*T) - self.omega*T), (1.0 / (self.omega**2))*(np.cosh(self.omega*T) - 1.0), (1.0 / self.omega)*np.sinh(self.omega*T), 0, 0, 0, 0, 0, 0]]).T
M_col2 = np.array([[0, 0, 0, (1.0 / (self.omega**3))*(np.sinh(self.omega*T) - T), (1.0 / (self.omega**2))*(np.cosh(self.omega*T) - 1.0), (1.0 / self.omega)*np.sinh(self.omega*T), 0, 0, 0]]).T
M_col3 = np.array([[0, 0, 0, 0, 0, 0, (1.0 / (self.omega**3))*(np.sinh(self.omega*T) - T), (1.0 / (self.omega**2))*(np.cosh(self.omega*T) - 1.0), (1.0 / self.omega)*np.sinh(self.omega*T)]]).T
B = np.hstack((M_col1, M_col2, M_col3))
return B
def D(self, T):
D_row1 = np.array([[1, 0, -self.h_CoM/SystemModel._g, 0, 0, 0, 0, 0, 0]])
D_row2 = np.array([[0, 0, 0, 1, 0, -self.h_CoM/SystemModel._g, 0, 0, 0]])
D = np.vstack((D_row1, D_row2))
return D
``` |
{
"source": "jmichalicek/django-mail-viewer",
"score": 3
} |
#### File: django_mail_viewer/backends/locmem.py
```python
from django.core import mail
from django.core.mail.backends.base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
"""
An email backend to use during testing and local development with Django Mail Viewer.
Similar to django.core.backends.locmem.EmailBackend, this adds an outbox attribute ot
django.core.mail. This stores the EmailMessage object as well as message.message().
This is because many of the headers are generated at the time message.message() is called
and are not stored by the default locmem backend, including ones useful for consistently
looking up a specific message even if the list is reordered.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not hasattr(mail, 'outbox'):
mail.outbox = []
def send_messages(self, messages):
msg_count = 0
for message in messages:
m = message.message()
mail.outbox.append(m)
msg_count += 1
return msg_count
def get_message(self, lookup_id):
"""
Look up and return a specific message in the outbox
"""
for message in mail.outbox:
# if a user is manually passing in Message-ID in extra_headers and capitalizing it
# differently than the expected Message-ID, which is suppored by
# EmailMessage.message(), then we can't just access the key directly. Instead iterate
# over the keys and vls
if message.get('message-id') == lookup_id:
return message
return None
def get_outbox(self, *args, **kwargs):
"""
Get the outbox used by this backend. This backend returns a copy of mail.outbox.
May add pagination args/kwargs.
"""
return getattr(mail, 'outbox', [])[:]
def delete_message(self, message_id: str):
"""
Remove the message with the given id from the mailbox
"""
outbox = getattr(mail, 'outbox', [])
index_to_remove = None
for idx, message in enumerate(outbox):
if message.get('message-id') == message_id:
index_to_remove = idx
break
if index_to_remove is not None:
del outbox[index_to_remove]
```
#### File: django_mail_viewer/templatetags/mail_viewer_tags.py
```python
from django import template
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag
def message_attribute(message, attribute):
"""
Return the attribute from the SafeMIMEMessage
This is required to deal with case insensitivity and some attributes use
a hyphen in the name. In a template, message.message-id, for example does
not work due to the hyphen.
"""
return message.get(attribute)
@register.simple_tag
def message_lookup_id(message):
"""
Return the message id of an email message.
Useful because the value is stored in a dict with a hyphen in the key,
making it inaccessible directly.
"""
return mark_safe(message.get('message-id', '').strip('<>'))
@register.simple_tag
def display_message_attribute(message, attribute):
return mark_safe(message_attribute(message, attribute))
```
#### File: django-mail-viewer/tests/test_models.py
```python
from pathlib import Path
import shutil
from django.conf import settings
from django.core import cache, mail
from django.test import TestCase
from django_mail_viewer.backends.database.models import EmailMessage
class DatabaseBackendEmailMessageTest(TestCase):
connection_backend = 'django_mail_viewer.backends.database.backend.EmailBackend'
@classmethod
def setUpTestData(cls):
m = mail.EmailMultiAlternatives(
'Email subject', 'Email text', '<EMAIL>', ['<EMAIL>', '<EMAIL>']
)
m.attach_alternative(
'<html><body><p style="background-color: #AABBFF; color: white">Email html</p></body></html>', 'text/html',
)
current_dir = Path(__file__).resolve().parent
m.attach_file(current_dir / 'test_files' / 'icon.gif', 'image/gif')
with mail.get_connection(cls.connection_backend) as connection:
connection.send_messages([m])
cls.multipart_message = EmailMessage.objects.filter(parent=None).first()
@classmethod
def tearDownClass(cls) -> None:
try:
shutil.rmtree(settings.MEDIA_ROOT)
finally:
super().tearDownClass()
def test_get(self):
test_matrix = [
{'header_name': 'Content-Type', 'value': 'multipart/mixed'},
{'header_name': 'Subject', 'value': 'Email subject'},
]
for t in test_matrix:
with self.subTest(header=t['header_name']):
self.assertEqual(self.multipart_message.get(t['header_name']), t['value'])
# test that looking up by headeris not case sensitive
self.assertEqual(
self.multipart_message.get(t['header_name']), self.multipart_message.get(t['header_name'].lower())
)
def test_is_multipart(self):
self.assertTrue(self.multipart_message.is_multipart())
with mail.get_connection(self.connection_backend) as connection:
mail.EmailMultiAlternatives(
f'Not multipart',
f'Not multipart',
'<EMAIL>',
['<EMAIL>', '<EMAIL>'],
connection=connection,
).send()
m = EmailMessage.objects.filter(parent=None).latest('id')
self.assertFalse(m.is_multipart())
def test_walk(self):
self.assertEqual(
list(EmailMessage.objects.filter(parent=self.multipart_message).order_by('-created_at', 'id')),
list(self.multipart_message.walk()),
)
def test_get_content_type(self):
# The main message followed by each of its parts
expected_content_types = ['multipart/mixed', 'multipart/alternative', 'text/plain', 'text/html', 'image/gif']
self.assertEqual(
expected_content_types,
[m.get_content_type() for m in EmailMessage.objects.all().order_by('created_at', 'id')],
)
def test_get_payload(self):
m = self.multipart_message.parts.exclude(file_attachment='').get()
# May need to seek back to 0 after this
self.assertEqual(m.file_attachment.read(), m.get_payload())
def test_get_filename(self):
m = self.multipart_message.parts.exclude(file_attachment='').get()
self.assertEqual('icon.gif', m.get_filename())
``` |
{
"source": "jmichalicek/djukebox",
"score": 3
} |
#### File: djukebox/djukebox/forms.py
```python
from django import forms
from django.template.defaultfilters import filesizeformat
from djukebox.app_settings import UPLOAD_FILE_MAX_SIZE, UPLOAD_FILE_TYPES
from djukebox.models import Track
class TrackUploadForm(forms.Form):
"""Form for uploading an audio file to create a Track()"""
file = forms.FileField(label='Select a song to upload')
def clean_file(self):
"""
Overrides default forms.Form.clean_file()
Checks to make sure the file is an appropriate file type and size
"""
#from http://stackoverflow.com/a/4855340
data = self.cleaned_data['file']
if data:
# This comes from the http headers. They could be lying.
# Be sure to validate the actual file after saving as well.
file_type = data.content_type
if len(data.name.split('.')) == 1:
raise forms.ValidationError('File type is not supported')
if file_type in UPLOAD_FILE_TYPES:
if data._size > UPLOAD_FILE_MAX_SIZE:
raise forms.ValidationError('Please keep filesize under %s. Current filesize %s' % (filesizeformat(UPLOAD_FILE_MAX_SIZE), filesizeformat(data._size)))
else:
raise forms.ValidationError('File type is not supported: %s' %file_type)
return data
class TrackEditForm(forms.Form):
title = forms.CharField(max_length=100, required=False)
artist = forms.CharField(max_length=100, required=False)
class AlbumEditForm(forms.Form):
title = forms.CharField(max_length=100, required=False, label='Album Title')
artist = forms.CharField(max_length=100, required=False, label='Album Artist')
```
#### File: djukebox/djukebox/tasks.py
```python
import importlib
import logging
from celery.task import task
from models import Mp3File, OggFile
import app_settings
logger = logging.getLogger(__name__)
# TODO: provide rate limiting options
@task
def convert_file_to_ogg(file_id):
# TODO: make sure the file isn't already an ogg
cls = app_settings.MP3_TO_OGG
logger.debug('Creating ogg from mp3 with module %s' % cls)
converter = class_from_string(cls)()
mp3_file = Mp3File.objects.get(id=file_id)
logger.debug('Begin encoding ogg from AudioFile id %s' % mp3_file.id)
ogg_file = converter.convert(mp3_file)
logger.debug('Finished encoding ogg. Created AudioFile id %s' % ogg_file.id)
return ogg_file
@task
def convert_file_to_mp3(file_id):
cls = app_settings.OGG_TO_MP3
logger.debug('Creating ogg from mp3 with module %s' % cls)
converter = class_from_string(cls)()
ogg_file = OggFile.objects.get(id=file_id)
logger.debug('Begin encoding mp3 from AudioFile id %s' % ogg_file.id)
mp3_file = converter.convert(ogg_file)
logger.debug('Finished encoding mp3. Created AudioFile id %s' % mp3_file.id)
return mp3_file
def class_from_string(class_string):
"""Takes a string package. Class and returns the class object"""
class_package = class_string.split('.')
module = '.'.join(class_package[:-1])
klass = getattr(importlib.import_module(module), class_package[-1])
return klass
```
#### File: djukebox/djukebox/tests.py
```python
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test import client
from mutagen.oggvorbis import OggVorbis
from mutagen.easyid3 import EasyID3
from mutagen.id3 import ID3, TPE2
from models import *
import json
import shutil
import os
import types
# TODO: For many test cases there needs to be either some audio files included or something which will generate a dummy audio files.
# Due to fk to user, create these objects when needed in code
def create_artist(user, name='Test'):
artist, created = Artist.objects.get_or_create(user=user, name=name)
return artist
def create_album(user, artist, title='Test Album'):
album, created = Album.objects.get_or_create(user=user, artist=artist, title=title)
return album
# Using pre-created silent files for now.
# These could be used to create silent files on the fly
#def ffmpeg_create_empty_mp3():
# """Create a 1 second silent mp3 using ffmpeg"""
# ffmpeg -ar 44100 -acodec pcm_s16le -f s16le -ac 2 -i /dev/zero -t 00:00:01 test.mp3
#def ffmpeg_create_empty_ogg():
# """Create a 1 second silent oga using ffmpeg"""
# ffmpeg -ar 44100 -acodec pcm_s16le -f s16le -ac 2 -i /dev/zero -t 00:00:01 -acodec libvorbis test.ogg
#def sox_create_empty_mp3():
# """Create a 1 second silent mp3 using sox"""
# sox -n silent.mp3 trim 0 1
#def sox_create_empty_ogg():
# """Create a 1 second silent ogg using sox"""
# sox -n -t ogg silent.ogg trim 0 1
class MainViewTests(TestCase):
"""Test cases for views.home"""
def setUp(self):
self.user = User.objects.create_user('test', '<EMAIL>', 'test')
def test_logged_in(self):
"""Access the main view while logged in"""
self.client.login(username='test', password='<PASSWORD>')
response = self.client.get(reverse('djukebox-home'))
self.assertEqual(response.status_code, 200)
def test_logged_out(self):
"""Access the main view when not logged in"""
response = self.client.get(reverse('djukebox-home'))
self.assertRedirects(response, '%s?next=%s' %(settings.LOGIN_URL, reverse('djukebox-home')))
class AudioFileTests(TestCase):
"""Test the AudioFile class"""
fixtures = ['test_audiofilemodeltests']
def setUp(self):
self.audiofile = AudioFile.objects.get(id=1)
dest_file = os.path.join(settings.MEDIA_ROOT,
self.audiofile.file.name)
#if not os.path.exists(os.path.join(dest_file, 'silent.ogg')):
#current_dir = os.path.dirname(os.path.abspath(__file__))
#source_ogg = os.path.join(current_dir, 'test_audio/silent.ogg')
#shutil.copyfile(source_ogg, dest_file)
class AudioFileUnicodeTests(AudioFileTests):
"""Test the __unicode__() method of the AudioFile class"""
def test__unicode(self):
self.assertEqual(self.audiofile.file.name, self.audiofile.__unicode__())
class OggFileTests(TestCase):
"""Test the OggFile class"""
fixtures = ['test_oggfilemodeltests']
# I can't get manage to straight monkey patch or use mock.patch
# to properly mock out mutagen.oggvorbis.OggVorbis, so just use
# small dummy audio files and the full classes for now. This is
# what the actual mutagen test cases do.
# Most of the test cases actually still patch the .get() method and the
# value it returns, but the file needs to exist for the OggVorbis object
# to be instantiated.
def setUp(self):
self.oggfile = OggFile.objects.get(id=1)
self.dest_file = os.path.join(settings.MEDIA_ROOT,
self.oggfile.file.name)
self.dest_dir = os.path.dirname(self.dest_file)
if not os.path.exists(self.dest_dir):
os.makedirs(self.dest_dir)
if os.path.exists(self.dest_dir):
current_dir = os.path.dirname(os.path.abspath(__file__))
source_ogg = os.path.join(current_dir, 'test_audio/silent.ogg')
shutil.copyfile(source_ogg, self.dest_file)
def tearDown(self):
# CAREFUL! This could actually delete a real file.
if os.path.exists(self.dest_file):
os.remove(self.dest_file)
# Don't delete the dir if there are other files in there
if os.path.exists(self.dest_dir) and not os.listdir(self.dest_dir):
# Will blow up if this is a sym link...
# make that be the case and find a better way to do it
shutil.rmtree(self.dest_dir)
class OggFileUnicodeTests(OggFileTests):
"""Test the __unicode__() method of the OggFile class"""
def test__unicode(self):
self.assertEqual(self.oggfile.file.name, self.oggfile.__unicode__())
class OggFileGetTitleTests(OggFileTests):
"""Test the get_title() method of the OggFile class"""
def test_get_title(self):
def fake_get(a, b, c):
return ['Fake Title']
orig = OggVorbis.get
OggVorbis.get = fake_get
self.assertEqual('Fake Title', self.oggfile.get_title())
OggVorbis.get = orig
class OggFileGetArtistTests(OggFileTests):
"""Test the get_artist() method of the OggFile class"""
def test_get_artist(self):
def fake_get(a,b,c):
return ['Fake Artist']
orig = OggVorbis.get
OggVorbis.get = fake_get
self.assertEqual('Fake Artist', self.oggfile.get_artist())
OggVorbis.get = orig
class OggFileGetAlbumTests(OggFileTests):
"""Test the get_album() method of the OggFile class"""
def test_get_album(self):
def fake_get(a,b,c):
return ['Fake Album']
orig = OggVorbis.get
OggVorbis.get = fake_get
self.assertEqual('Fake Album', self.oggfile.get_album())
OggVorbis.get = orig
class Mp3FileTests(TestCase):
"""Test the Mp3File class"""
fixtures = ['test_mp3filemodeltests']
# I can't get manage to straight monkey patch or use mock.patch
# to properly mock out mutagen.easyid3.EasyID3, so just use
# small dummy audio files and the full classes for now. This is
# what the actual mutagen test cases do.
def setUp(self):
self.mp3file = Mp3File.objects.get(id=1)
self.dest_file = os.path.join(settings.MEDIA_ROOT,
self.mp3file.file.name)
self.dest_dir = os.path.dirname(self.dest_file)
if not os.path.exists(self.dest_dir):
os.makedirs(self.dest_dir)
if not os.path.exists(self.dest_file):
current_dir = os.path.dirname(os.path.abspath(__file__))
source_mp3 = os.path.join(current_dir, 'test_audio/silent.mp3')
shutil.copyfile(source_mp3, self.dest_file)
def tearDown(self):
dest_file = os.path.join(settings.MEDIA_ROOT,
self.mp3file.file.name)
if os.path.exists(self.dest_file):
os.remove(self.dest_file)
# Don't delete the dir if there are other files in there
if os.path.exists(self.dest_dir) and not os.listdir(self.dest_dir):
# Will blow up if this is a sym link...
# make that be the case and find a better way to do it
shutil.rmtree(self.dest_dir)
class Mp3FileUnicodeTests(Mp3FileTests):
"""Test the __unicode__() method of the Mp3File class"""
def test__unicode(self):
self.assertEqual(self.mp3file.file.name, self.mp3file.__unicode__())
class Mp3FileGetTitleTests(Mp3FileTests):
"""Test the get_title() method of the OggFile class"""
def test_single_title(self):
def fake_get(a,b,c):
return ['Fake Title']
orig = EasyID3.get
EasyID3.get = fake_get
self.assertEqual('Fake Title', self.mp3file.get_title())
EasyID3.get = orig
class Mp3SourceConversionTests(TestCase):
fixtures = ['test_mp3filemodeltests']
def setUp(self):
self.mp3file = Mp3File.objects.get(id=1)
dest_file = os.path.join(settings.MEDIA_ROOT,
self.mp3file.file.name)
if not os.path.exists(dest_file):
current_dir = os.path.dirname(os.path.abspath(__file__))
source_mp3 = os.path.join(current_dir, 'test_audio/silent.mp3')
shutil.copyfile(source_mp3, dest_file)
def tearDown(self):
dest_file = os.path.join(settings.MEDIA_ROOT,
self.mp3file.file.name)
if os.path.exists(dest_file):
os.remove(dest_file)
class OggSourceConversionTests(TestCase):
fixtures = ['test_oggfilemodeltests']
def setUp(self):
self.oggfile = OggFile.objects.get(id=1)
dest_file = os.path.join(settings.MEDIA_ROOT,
self.oggfile.file.name)
if not os.path.exists(dest_file):
current_dir = os.path.dirname(os.path.abspath(__file__))
source_ogg = os.path.join(current_dir, 'test_audio/silent.ogg')
shutil.copyfile(source_ogg, dest_file)
def tearDown(self):
dest_file = os.path.join(settings.MEDIA_ROOT,
self.oggfile.file.name)
if os.path.exists(dest_file):
os.remove(dest_file)
class FileConversionUnitTests(TestCase):
def test_convert_file_to_ogg_task(self):
pass
def test_convert_file_to_mp3_task(self):
pass
def test_DjukeboxMp3FromOgg(self):
pass
def test_DjukeboxOggFromMp3(self):
pass
# Master branch has handy ResourceTestCase
class ApiTests(TestCase):
"""Test the REST API"""
fixtures = ['djukebox_api_tests']
def setUp(self):
super(ApiTests, self).setUp()
self.user = User.objects.get(id=1)
self.user.set_password('<PASSWORD>')
self.user.save()
def tearDown(self):
super(ApiTests, self).tearDown()
self.client.logout()
class AlbumResourceTests(ApiTests):
"""Test usage of the AlbumResource"""
def test_get_list_not_logged_in(self):
self.client.logout()
request_args = {'resource_name': 'album',
'api_name': 'v1'}
response = self.client.get(reverse(
'api_dispatch_list',
kwargs=request_args))
self.assertEqual(response.status_code, 401)
def test_get_list(self):
"""Test the default behavior getting the AlbumResource list"""
self.client.login(username=self.user.username,
password='<PASSWORD>')
request_args = {'resource_name': 'album',
'api_name': 'v1'}
response = self.client.get(reverse(
'api_dispatch_list',
kwargs=request_args))
self.assertEqual(response.status_code, 200)
self.assertTrue(response['Content-Type'].startswith('application/json'))
returned = json.loads(response.content)
self.assertTrue('albums' in returned)
self.assertEqual(len(returned['albums']), 2)
first = returned['albums'][0]
self.assertEqual(type(first['artist']), types.UnicodeType)
self.assertEqual(type(first['tracks'][0]), types.UnicodeType)
def test_get_list_artist_details(self):
"""Test getting the AlbumResource list with Artist details"""
self.client.login(username=self.user.username,
password='<PASSWORD>')
request_args = {'resource_name': 'album',
'api_name': 'v1'}
response = self.client.get(reverse(
'api_dispatch_list',
kwargs=request_args), data={'details': 'artist'})
self.assertEqual(response.status_code, 200)
self.assertTrue(response['Content-Type'].startswith('application/json'))
returned = json.loads(response.content)
self.assertTrue('albums' in returned)
self.assertEqual(len(returned['albums']), 2)
first = returned['albums'][0]
self.assertEqual(type(first['artist']), types.DictType)
self.assertTrue('name' in first['artist'])
def test_get_list_track_details(self):
"""Test getting the AlbumResource list with Track details"""
self.client.login(username=self.user.username,
password='<PASSWORD>')
request_args = {'resource_name': 'album',
'api_name': 'v1'}
response = self.client.get(reverse(
'api_dispatch_list',
kwargs=request_args), data={'details': 'track'})
self.assertEqual(response.status_code, 200)
self.assertTrue(response['Content-Type'].startswith('application/json'))
returned = json.loads(response.content)
self.assertTrue('albums' in returned)
self.assertEqual(len(returned['albums']), 2)
first = returned['albums'][0]
self.assertEqual(type(first['tracks'][0]), types.DictType)
self.assertTrue('title' in first['tracks'][0])
def test_get_details(self):
"""Test the default behavior getting the AlbumResource details"""
album = Album.objects.all()[0]
self.client.login(username=self.user.username,
password='<PASSWORD>')
request_args = {'resource_name': 'album',
'api_name': 'v1',
'pk': album.pk}
response = self.client.get(reverse(
'api_dispatch_detail',
kwargs=request_args))
self.assertEqual(response.status_code, 200)
self.assertTrue(response['Content-Type'].startswith('application/json'))
returned = json.loads(response.content)
self.assertTrue('tracks' in returned)
self.assertTrue('title' in returned)
self.assertTrue('artist' in returned)
self.assertEqual(type(returned['artist']), types.UnicodeType)
self.assertEqual(type(returned['tracks'][0]), types.UnicodeType)
def test_get_details_artist_details(self):
"""Test the default behavior getting the AlbumResource details"""
album = Album.objects.all()[0]
self.client.login(username=self.user.username,
password='<PASSWORD>')
request_args = {'resource_name': 'album',
'api_name': 'v1',
'pk': album.pk}
response = self.client.get(reverse(
'api_dispatch_detail',
kwargs=request_args), data={'details': 'artist'})
self.assertEqual(response.status_code, 200)
self.assertTrue(response['Content-Type'].startswith('application/json'))
returned = json.loads(response.content)
self.assertTrue('tracks' in returned)
self.assertTrue('title' in returned)
self.assertTrue('artist' in returned)
self.assertEqual(type(returned['artist']), types.DictType)
self.assertEqual(type(returned['tracks'][0]), types.UnicodeType)
def test_get_details_track_details(self):
"""Test the default behavior getting the AlbumResource details"""
album = Album.objects.all()[0]
self.client.login(username=self.user.username,
password='<PASSWORD>')
request_args = {'resource_name': 'album',
'api_name': 'v1',
'pk': album.pk}
response = self.client.get(reverse(
'api_dispatch_detail',
kwargs=request_args), data={'details': 'track'})
self.assertEqual(response.status_code, 200)
self.assertTrue(response['Content-Type'].startswith('application/json'))
returned = json.loads(response.content)
self.assertTrue('tracks' in returned)
self.assertTrue('title' in returned)
self.assertTrue('artist' in returned)
self.assertEqual(type(returned['artist']), types.UnicodeType)
self.assertEqual(type(returned['tracks'][0]), types.DictType)
class ArtistResourceTests(ApiTests):
"""Test usage of the ArtistResource"""
def test_get_list_not_logged_in(self):
self.client.logout()
request_args = {'resource_name': 'artist',
'api_name': 'v1'}
response = self.client.get(reverse(
'api_dispatch_list',
kwargs=request_args))
self.assertEqual(response.status_code, 401)
def test_get_list(self):
"""Test the default behavior getting the AlbumResource list"""
self.client.login(username=self.user.username,
password='<PASSWORD>')
request_args = {'resource_name': 'artist',
'api_name': 'v1'}
response = self.client.get(reverse(
'api_dispatch_list',
kwargs=request_args))
self.assertEqual(response.status_code, 200)
self.assertTrue(response['Content-Type'].startswith('application/json'))
returned = json.loads(response.content)
self.assertTrue('artists' in returned)
self.assertEqual(len(returned['artists']), 2)
first = returned['artists'][0]
self.assertEqual(type(first['name']), types.UnicodeType)
def test_get_artist_details(self):
"""Test the default behavior getting the ArtistResource details"""
self.client.login(username=self.user.username,
password='<PASSWORD>')
artist = Artist.objects.all()[0]
request_args = {'resource_name': 'artist',
'api_name': 'v1',
'pk': artist.pk}
response = self.client.get(reverse(
'api_dispatch_detail',
kwargs=request_args))
self.assertEqual(response.status_code, 200)
self.assertTrue(response['Content-Type'].startswith('application/json'))
returned = json.loads(response.content)
self.assertTrue('name' in returned)
self.assertEqual(returned['name'], artist.name)
self.assertEqual(int(returned['id']), artist.id)
class TrackResourceTests(ApiTests):
"""Test usage of the TrackResource"""
def test_get_list_not_logged_in(self):
self.client.logout()
request_args = {'resource_name': 'track',
'api_name': 'v1'}
response = self.client.get(reverse(
'api_dispatch_list',
kwargs=request_args))
self.assertEqual(response.status_code, 401)
def test_get_list(self):
"""Test the default behavior getting the TrackResource list"""
self.client.login(username=self.user.username,
password='<PASSWORD>')
request_args = {'resource_name': 'track',
'api_name': 'v1'}
response = self.client.get(reverse(
'api_dispatch_list',
kwargs=request_args))
self.assertEqual(response.status_code, 200)
self.assertTrue(response['Content-Type'].startswith('application/json'))
returned = json.loads(response.content)
self.assertTrue('tracks' in returned)
self.assertEqual(len(returned['tracks']), 3)
first = returned['tracks'][0]
self.assertEqual(type(first['album']), types.UnicodeType)
self.assertEqual(type(first['title'][0]), types.UnicodeType)
self.assertEqual(type(first['ogg_stream_url']), types.UnicodeType)
self.assertEqual(type(first['mp3_stream_url']), types.UnicodeType)
def test_get_list_album_details(self):
"""Test TrackResource list with album details"""
self.client.login(username=self.user.username,
password='<PASSWORD>')
request_args = {'resource_name': 'track',
'api_name': 'v1'}
response = self.client.get(reverse(
'api_dispatch_list',
kwargs=request_args), data={'details': 'album'})
self.assertEqual(response.status_code, 200)
self.assertTrue(response['Content-Type'].startswith('application/json'))
returned = json.loads(response.content)
self.assertTrue('tracks' in returned)
self.assertEqual(len(returned['tracks']), 3)
first = returned['tracks'][0]
self.assertEqual(type(first['album']), types.DictType)
self.assertEqual(type(first['title'][0]), types.UnicodeType)
self.assertEqual(type(first['ogg_stream_url']), types.UnicodeType)
self.assertEqual(type(first['mp3_stream_url']), types.UnicodeType)
check_track = Track.objects.get(id=int(first['id']))
self.assertEqual(first['album']['title'], check_track.album.title)
def test_get_details(self):
"""Test the default behavior getting the TrackResource details"""
track = Track.objects.all()[0]
self.client.login(username=self.user.username,
password='<PASSWORD>')
request_args = {'resource_name': 'track',
'api_name': 'v1',
'pk': track.pk}
response = self.client.get(reverse(
'api_dispatch_detail',
kwargs=request_args))
self.assertEqual(response.status_code, 200)
self.assertTrue(response['Content-Type'].startswith('application/json'))
returned = json.loads(response.content)
self.assertTrue('mp3_stream_url' in returned)
self.assertTrue('ogg_stream_url' in returned)
self.assertTrue('album' in returned)
self.assertTrue('title' in returned)
self.assertTrue('track_number' in returned)
self.assertTrue('id' in returned)
self.assertEqual(type(returned['album']), types.UnicodeType)
self.assertEqual(type(returned['title']), types.UnicodeType)
self.assertEqual(type(returned['mp3_stream_url']), types.UnicodeType)
self.assertEqual(type(returned['ogg_stream_url']), types.UnicodeType)
def test_get_details_album_details(self):
"""Test getting the TrackResource details with album details"""
track = Track.objects.all()[0]
self.client.login(username=self.user.username,
password='<PASSWORD>')
request_args = {'resource_name': 'track',
'api_name': 'v1',
'pk': track.pk}
response = self.client.get(reverse(
'api_dispatch_detail',
kwargs=request_args), data={'details': 'album'})
self.assertEqual(response.status_code, 200)
self.assertTrue(response['Content-Type'].startswith('application/json'))
returned = json.loads(response.content)
self.assertTrue('mp3_stream_url' in returned)
self.assertTrue('ogg_stream_url' in returned)
self.assertTrue('album' in returned)
self.assertTrue('title' in returned)
self.assertTrue('track_number' in returned)
self.assertTrue('id' in returned)
self.assertEqual(type(returned['album']), types.DictType)
self.assertEqual(type(returned['title']), types.UnicodeType)
self.assertEqual(type(returned['mp3_stream_url']), types.UnicodeType)
self.assertEqual(type(returned['ogg_stream_url']), types.UnicodeType)
def test_delete_details(self):
"""Test deleting a track"""
track = Track.objects.all()[0]
self.client.login(username=self.user.username,
password='<PASSWORD>')
request_args = {'resource_name': 'track',
'api_name': 'v1',
'pk': track.pk}
response = self.client.delete(reverse(
'api_dispatch_detail',
kwargs=request_args))
self.assertEqual(response.status_code, 204)
self.assertTrue(response['Content-Type'].startswith('text/html'))
def test_delete_details_not_logged_in(self):
"""Test deleting a track"""
track = Track.objects.all()[0]
self.client.logout()
request_args = {'resource_name': 'track',
'api_name': 'v1',
'pk': track.pk}
response = self.client.delete(reverse(
'api_dispatch_detail',
kwargs=request_args))
self.assertEqual(response.status_code, 401)
self.assertTrue(response['Content-Type'].startswith('text/html'))
class TrackAlbumResourceTests(ApiTests):
def setUp(self):
super(TrackAlbumResourceTests, self).setUp()
def test_update_track_title(self):
artist = Artist.objects.create(name='<NAME>', user=self.user)
album = Album.objects.create(title='Justin Rocks Out', artist=artist, user=self.user)
track = Track.objects.create(title='Justin Is Currently Rocking', album=album, artist=artist, user=self.user)
new_data = {
'track_artist': track.artist.name + ' 2',
'track_title': track.title + ' 2',
'album_artist': album.artist.name,
'album_title': album.title
}
reverse_kwargs = {'resource_name': 'track_album', 'api_name': 'v1', 'pk': track.pk}
# this should work after upgrade to django 1.6
response = self.client.patch(reverse('api_dispatch_detail', kwargs=reverse_kwargs),
new_data)
self.assertEqual(response.status_code, 200)
```
#### File: djukebox/djukebox/views.py
```python
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.conf import settings
from django.db import transaction
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.views.decorators.cache import cache_control
from django.http import HttpResponseRedirect, HttpResponse
from djukebox.models import Album, Track, OggFile, Mp3File
from djukebox.forms import AlbumEditForm, TrackEditForm, TrackUploadForm
from djukebox.tasks import convert_file_to_ogg, convert_file_to_mp3
from djukebox import app_settings
import os
import mimetypes
import logging
logger = logging.getLogger(__name__)
@cache_control(no_cache=True)
@login_required
def stream_track(request, track_id, file_format):
"""Stream out the audio file for a track"""
# TODO: Rename this to stream_track?
# file_format is a callable, the class of audio file to play such as Mp3File or OggFile
track = get_object_or_404(file_format, track__id=track_id, track__user=request.user)
file_path = os.path.join(settings.MEDIA_ROOT, track.file.name)
# ogg files encoded with pysox seem to be getting a media type of (audio/ogg, none) as a tuple
# which throws off firefox when it gets the content-type header. Opera is ok with it, though.
# As a fix I am just grabbing the first one here which seems to always work
resp = HttpResponse(FileIterWrapper(open(file_path,"rb")), mimetype=mimetypes.guess_type(file_path)[0])
resp['Content-Length'] = os.path.getsize(file_path)
resp['Content-Disposition'] = 'filename=' + os.path.basename(file_path)
return resp
@login_required
def main(request):
"""The primary Djukebox view which renders the UI"""
# This will get populated with track data in javascript in the html
# That initially sounds like we might as well do the whole form there
# but this makes it easier to keep aligned with what the REST API will
# be using to validate track updates.
track_edit_form = TrackEditForm(prefix='track')
album_edit_form = AlbumEditForm(prefix='album')
return render_to_response(
'djukebox/main.html',
{'content_view': reverse('djukebox-home'),
'track_edit_form': track_edit_form,
'album_edit_form': album_edit_form},
context_instance=RequestContext(request)
)
@cache_control(no_cache=True)
@login_required
@transaction.commit_on_success
def upload_track(request, hidden_frame=False):
"""Handle the upload of an audio file and create a new Track()"""
# TODO: break this up into smaller functions
# TODO: this needs to deal with re-encoding tracks as mp3 and ogg and the Track model
# probably also needs updated to deal with this
if request.method == 'POST':
upload_form = TrackUploadForm(request.POST, request.FILES)
if upload_form.is_valid():
default_track_title = app_settings.DEFAULT_TRACK_TITLE
default_artist = app_settings.DEFAULT_ARTIST
track = Track(user=request.user)
track.title = default_track_title
track.full_clean()
track.save()
mp3_content_types = app_settings.MP3_CONTENT_TYPES
ogg_content_types = app_settings.OGG_CONTENT_TYPES
file_data = upload_form.cleaned_data['file']
# TODO: Add more flexibility for user to specify mp3 and ogg content types?
if file_data.content_type in mp3_content_types:
logger.debug('mp3 file was uploaded')
audio_file = Mp3File(file=file_data)
if file_data.content_type in ogg_content_types:
logger.debug('ogg file was uploaded')
audio_file = OggFile(file=file_data)
audio_file.track = track
audio_file.full_clean()
audio_file.save()
# Now that the physical file has been written, read the metadata
new_title = audio_file.get_title()
track.title = (new_title if new_title != '' else default_track_title)
album = Album.album_from_metadata(audio_file)
album.full_clean()
album.save()
track.album = album
track.full_clean()
track.save()
#TODO: Set artist on Track()
# Now that this is saved, make sure the file really is a valid filetype
# and kill it if it's not. The track upload form validates the http content-type header
# but does not actually check the file.
mimetype = mimetypes.guess_type(os.path.join(settings.MEDIA_ROOT, audio_file.file.name))[0]
# Check allowed file types first. We may want to only allow certain types
# even if the system can support others.
# Windows is annoying and sends video/ogg if the file extension is .ogg
# and audio/ogg if it's .oga even though the standard says .ogg is for audio
# according to wikipedia "Since 2007, the Xiph.Org Foundation recommends that .ogg only be
# used for Ogg Vorbis audio files"
# TODO: Use messages framework for these track upload success/fail messages
# as well as get celery tasks to do that. https://github.com/codeinthehole/django-async-messages maybe?
# ...or write my own for fun.
if mimetype in app_settings.UPLOAD_FILE_TYPES:
if app_settings.CONVERT_UPLOADS:
if mimetype in ogg_content_types:
convert_file_to_mp3.delay(audio_file.id)
elif mimetype in mp3_content_types:
convert_file_to_ogg.delay(audio_file.id)
logger.debug('Successfully uploaded track {0} with id {1}'.format(track.title, track.id))
json_response_data = {'track_upload': {'status': 'success', 'title': track.title}}
else:
# Delete the Track and AudioFile and return an error
json_response_data = '{"track_upload": {"status": "error", "error": "invalid file type %s"}}' % mimetype
audio_file.delete()
track.delete()
logger.warn('mimetypes.guess_type detected different content type than http header specified')
else:
# Get the errors in a cleaner way
logger.debug('{"track_upload": {"status": "error", "errors": %s}}' % upload_form.errors)
json_response_data = {'track_upload': {'status': 'error', 'errors': upload_form.errors.values()}}
if hidden_frame == True:
import json
return HttpResponse(json.dumps(json_response_data), mimetype='application/javascript')
else:
# On the off chance the upload is not being posted to an iframe so that it can happen asynchronously
# where is a sane place to redirect to?
return HttpResponseRedirect(reverse('djukebox-homeframe'))
else:
upload_form = TrackUploadForm()
return render_to_response(
'djukebox/upload_track.html',
{'upload_form': upload_form},
context_instance=RequestContext(request)
)
# This should possibly go elsewhere
# Grabbed it from http://metalinguist.wordpress.com/2008/02/12/django-file-and-stream-serving-performance-gotcha/
class FileIterWrapper(object):
"""Read a file in chunks with iter and next rather than until next newline"""
def __init__(self, flo, chunk_size = 1024**2):
self.flo = flo
self.chunk_size = chunk_size
def next(self):
data = self.flo.read(self.chunk_size)
if data:
return data
else:
raise StopIteration
def __iter__(self):
return self
``` |
{
"source": "jmichellec/VU-Athena-Covid-Project",
"score": 2
} |
#### File: VU-Athena-Covid-Project/DETM-master/data.py
```python
import os
import random
import pickle
import numpy as np
import torch
import scipy.io
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def _fetch(path, name):
if name == 'train':
token_file = os.path.join(path, 'bow_tr_tokens.mat')
count_file = os.path.join(path, 'bow_tr_counts.mat')
elif name == 'valid':
token_file = os.path.join(path, 'bow_va_tokens.mat')
count_file = os.path.join(path, 'bow_va_counts.mat')
else:
token_file = os.path.join(path, 'bow_ts_tokens.mat')
count_file = os.path.join(path, 'bow_ts_counts.mat')
tokens = scipy.io.loadmat(token_file)['tokens'].squeeze()
counts = scipy.io.loadmat(count_file)['counts'].squeeze()
if name == 'test':
token_1_file = os.path.join(path, 'bow_ts_h1_tokens.mat')
count_1_file = os.path.join(path, 'bow_ts_h1_counts.mat')
token_2_file = os.path.join(path, 'bow_ts_h2_tokens.mat')
count_2_file = os.path.join(path, 'bow_ts_h2_counts.mat')
tokens_1 = scipy.io.loadmat(token_1_file)['tokens'].squeeze()
counts_1 = scipy.io.loadmat(count_1_file)['counts'].squeeze()
tokens_2 = scipy.io.loadmat(token_2_file)['tokens'].squeeze()
counts_2 = scipy.io.loadmat(count_2_file)['counts'].squeeze()
return {'tokens': tokens, 'counts': counts, 'tokens_1': tokens_1, 'counts_1': counts_1, 'tokens_2': tokens_2, 'counts_2': counts_2}
return {'tokens': tokens, 'counts': counts}
def _fetch_temporal(path, name):
if name == 'train':
token_file = os.path.join(path, 'bow_tr_tokens.mat')
count_file = os.path.join(path, 'bow_tr_counts.mat')
time_file = os.path.join(path, 'bow_tr_timestamps.mat')
elif name == 'valid':
token_file = os.path.join(path, 'bow_va_tokens.mat')
count_file = os.path.join(path, 'bow_va_counts.mat')
time_file = os.path.join(path, 'bow_va_timestamps.mat')
else:
token_file = os.path.join(path, 'bow_ts_tokens.mat')
count_file = os.path.join(path, 'bow_ts_counts.mat')
time_file = os.path.join(path, 'bow_ts_timestamps.mat')
tokens = scipy.io.loadmat(token_file)['tokens'].squeeze()
counts = scipy.io.loadmat(count_file)['counts'].squeeze()
times = scipy.io.loadmat(time_file)['timestamps'].squeeze()
if name == 'test':
token_1_file = os.path.join(path, 'bow_ts_h1_tokens.mat')
count_1_file = os.path.join(path, 'bow_ts_h1_counts.mat')
token_2_file = os.path.join(path, 'bow_ts_h2_tokens.mat')
count_2_file = os.path.join(path, 'bow_ts_h2_counts.mat')
tokens_1 = scipy.io.loadmat(token_1_file)['tokens'].squeeze()
counts_1 = scipy.io.loadmat(count_1_file)['counts'].squeeze()
tokens_2 = scipy.io.loadmat(token_2_file)['tokens'].squeeze()
counts_2 = scipy.io.loadmat(count_2_file)['counts'].squeeze()
return {'tokens': tokens, 'counts': counts, 'times': times,
'tokens_1': tokens_1, 'counts_1': counts_1,
'tokens_2': tokens_2, 'counts_2': counts_2}
return {'tokens': tokens, 'counts': counts, 'times': times}
def get_data(path, temporal=False):
### load vocabulary
with open(os.path.join(path, 'vocab.pkl'), 'rb') as f:
vocab = pickle.load(f)
if not temporal:
train = _fetch(path, 'train')
valid = _fetch(path, 'valid')
test = _fetch(path, 'test')
else:
train = _fetch_temporal(path, 'train')
valid = _fetch_temporal(path, 'valid')
test = _fetch_temporal(path, 'test')
return vocab, train, valid, test
def get_batch(tokens, counts, ind, vocab_size, emsize=300, temporal=False, times=None):
"""fetch input data by batch."""
batch_size = len(ind)
data_batch = np.zeros((batch_size, vocab_size))
if temporal:
times_batch = np.zeros((batch_size, ))
for i, doc_id in enumerate(ind):
doc = tokens[doc_id]
count = counts[doc_id]
if temporal:
timestamp = times[doc_id]
times_batch[i] = timestamp
L = count.shape[1]
if len(doc) == 1:
doc = [doc.squeeze()]
count = [count.squeeze()]
else:
doc = doc.squeeze()
count = count.squeeze()
if doc_id != -1:
for j, word in enumerate(doc):
data_batch[i, word] = count[j]
data_batch = torch.from_numpy(data_batch).float().to(device)
if temporal:
times_batch = torch.from_numpy(times_batch).to(device)
return data_batch, times_batch
return data_batch
def get_rnn_input(tokens, counts, times, num_times, vocab_size, num_docs):
indices = torch.randperm(num_docs)
indices = torch.split(indices, 1000)
rnn_input = torch.zeros(num_times, vocab_size).to(device)
cnt = torch.zeros(num_times, ).to(device)
for idx, ind in enumerate(indices):
data_batch, times_batch = get_batch(tokens, counts, ind, vocab_size, temporal=True, times=times)
for t in range(num_times):
tmp = (times_batch == t).nonzero()
docs = data_batch[tmp].squeeze().sum(0)
rnn_input[t] += docs
cnt[t] += len(tmp)
if idx % 20 == 0:
print('idx: {}/{}'.format(idx, len(indices)))
rnn_input = rnn_input / cnt.unsqueeze(1)
return rnn_input
```
#### File: VU-Athena-Covid-Project/pipeline/preprocessing.py
```python
import pandas as pd
import datetime
import argparse
import os
from pattern.nl import sentiment
# python preprocessing.py FB_NOS_NU_Telegraaf_NRC_all_endFeb.csv fb
import nltk
from nltk.corpus import stopwords
import re
import spacy
import glob
def read_file(csv_file):
"""
params:
csv_file: file to be processed
returns: dataframe
"""
df = pd.read_csv(csv_file, delimiter=';').fillna('None')
return df
def df_preprocessing(df, platform):
"""
params:
df: dataframe
platform: type of platform (facebook, twitter, etc.)
returns: dataframe
"""
if (platform=='fb'):
# Rename columns
df = df.rename(columns={'like.summary.total_count': 'like_count',
'love.summary.total_count': 'love_count',
'haha.summary.total_count': 'haha_count',
'wow.summary.total_count': 'wow_count',
'sad.summary.total_count': 'sad_count',
'angry.summary.total_count': 'angry_count',
'message': 'text'
})
# Reformat date
df['query_time'] = df['query_time'].apply(
lambda x: datetime.datetime.strptime(x, '%Y-%m-%d %H:%M:%S.%f').date().strftime(
'%Y-%m-%d') if x != 'None' else 'None')
df['created_time'] = df['created_time'].apply(
lambda x: datetime.datetime.strptime(x, '%Y-%m-%dT%H:%M:%S%z').date().strftime(
'%Y-%m-%d') if x != 'None' else 'None')
# Remove rows where messages are empty or character length smaller than 10
df = df[(df['text'] != 'None') | (df['text'].apply(lambda x: len(x) >= 10))]
# drop duplicates
df = df.drop_duplicates(subset='text')
return df
def stopwords_merged():
"""
Merge all stopwords into one stopwords list
returns list of stopwords
"""
# Start with nltk
stopwords_all = stopwords.words('dutch')
# # Get paths with stopwords
paths = glob.glob('stopwords/*.txt')
for file in paths:
with open(file, 'rb') as f:
stopwords_list = f.read().splitlines()
stopwords_all += stopwords_list
unique_stopwords = list(set(stopwords_all))
return unique_stopwords
def preprocess_text(text, nlp):
"""
params:
text: text string
nlp: language model for tokenization and pos-tagging
returns: list of tokenized words, clean text, sentiment and subjectivity
"""
# Remove punctuations; if there is a letter attached, append to word before
# there's -> theres
no_urls = re.sub(r"http\S+", "", text)
punctuations = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~’”“'
text_clean = no_urls.translate(str.maketrans('', '', punctuations)).lower()
# Tokenize sentence
doc = nlp(text_clean)
tokens_pos = []
for token in doc:
# if token.text not in stopwords_merged():
# pass
# else:
tokens_pos.append((token.text, token.pos_))
text_clean = ' '.join(word[0] for word in tokens_pos)
if text_clean == '':
text_clean = 'None'
sent = sentiment(text)[0]
subj = sentiment(text)[1]
return sent, subj, text_clean, tokens_pos
def create_preprocessed_file(df, outfile_name):
"""
params:
df: dataframe
returns: None
"""
# Each word in the lexicon has scores for:
# 1) polarity: negative vs. positive (-1.0 => +1.0)
# 2) subjectivity: objective vs. subjective (+0.0 => +1.0)
# 3) intensity: modifies next word? (x0.5 => x2.0)
nlp = spacy.load('nl_core_news_sm')
# Add columns sentiment, subjectivity, tokens+POS
df[['sentiment', 'subjectivity', 'clean_text', 'tokens_pos']] = df.apply(lambda x: preprocess_text(x.text, nlp),
1, result_type='expand')
# Remove empties
df = df[df['clean_text'] != 'None']
df.to_csv(outfile_name, index=False, sep='\t', encoding='utf-8')
def main():
parser = argparse.ArgumentParser(description='List the content of a folder')
# Add the arguments
parser.add_argument('csv_file', help='file to be preprocessed')
parser.add_argument('platform', help='which platform')
# Execute the parse_args() method
args = parser.parse_args()
# Create folder
output_dir = 'preprocessed_data'
os.makedirs('preprocessed_data', exist_ok=True)
# file paths
csv_out = output_dir + '/' + args.platform + '_preprocessed_' + args.csv_file
print("Starting preprocessing: might take awhile.")
df = read_file(args.csv_file)
df = df_preprocessing(df, args.platform)
create_preprocessed_file(df, csv_out)
print("Finished preprocessing data and saved file to preprocessed_data folder.")
if __name__ == '__main__':
main()
``` |
{
"source": "jmichellehu/rs_tools",
"score": 3
} |
#### File: rs_tools/bin/L8_TOA_refl.py
```python
import math
import geoio
from gdalconst import *
import argparse
import re
# Have user define input data from MTL file and output filename
parser = argparse.ArgumentParser(description='GeoTiff Landsat 8 Multispectral Image to TOA Reflectance Script')
parser.add_argument('-in', '--input_file', help='GeoTiff multi band MS image file', required=True)
# parser.add_argument('-in_band', '--input_band', help='GeoTiff multi band', required=True)
# parser.add_argument('-M', '--input_Mp', help='GeoTiff multi band Reflectance Multiplication input', required=True)
# parser.add_argument('-A', '--input_Ap', help='GeoTiff multi band Reflectance Addition input', required=True)
# parser.add_argument('-sun', '--input_SunEl', help='GeoTiff multi band Sun Elevation input', required=True)
parser.add_argument('-in_MTL', '--input_MTL_textfile', help='Delivered with L8 imagery', required=True)
parser.add_argument('-out', '--output_file', help='Where TOA reflectance image is to be saved', required=True)
args = parser.parse_args()
in_filename = args.input_file
# Mp = float(args.input_Mp)
# Ap = float(args.input_Ap)
# sunelev = float(args.input_SunEl)
in_MTL_filename = args.input_MTL_textfile
out_filename = args.output_file
######## --------- Define functions --------- ########
# Check that values for list are equivalent. Sourced from https://stackoverflow.com/questions/3844801/check-if-all-elements-in-a-list-are-identical
def check_equal(some_list):
# return boolean of equality for 2nd element to end and 1st element to penultimate
return some_list[1:] == some_list[:-1]
def get_val(some_list):
# extract value after " = " in list of strings
vals = [val.split(' = ')[1] for val in some_list]
return(vals)
### --- Extract Mp, Ap, and sunelev values from MTL file --- ###
mtl_list = []
with open(in_MTL_filename, 'r') as f:
for line in f:
# strip the trailing newline character
line=line.rstrip()
# and strip the leading whitespaces, newline, and tab characters
line=line.lstrip()
# append this to the list
mtl_list.append(line)
# Use regular expressions to find matches for the Mp, Ap, and SunEl values
Mp_pattern=re.compile(r"(REFLECTANCE_MULT).*")
Ap_pattern=re.compile(r"(REFLECTANCE_ADD).*")
Sun_pattern=re.compile(r"(SUN_).*")
# iterate through each line in the list and return matches
Mp_list = [m.group() for line in mtl_list for m in [Mp_pattern.search(line)] if m]
Ap_list = [m.group() for line in mtl_list for m in [Ap_pattern.search(line)] if m]
Sun_list = [m.group() for line in mtl_list for m in [Sun_pattern.search(line)] if m]
# extract corresponding value (i.e. the bit after " = ")
Mp_val = get_val(Mp_list)
Ap_val = get_val(Ap_list)
Sun_val = get_val(Sun_list)
# Check that each band has the same value for Mp and Ap, and save extracted values as floats in the Mp, Ap, and sunel variables to be used in L8_toa_refl calculations. Otherwise, flag it and tell the user to check the MTL file
if check_equal(Mp_val):
Mp=float(Mp_val[0])
else:
print("Mp values are not equal, examine MTL file")
print(Mp_list)
if check_equal(Ap_val):
Ap=float(Ap_val[0])
else:
print("Ap values are not equal, examine MTL file")
print(Ap_list)
if (float(Sun_val[1]) <= 90.0 and float(Sun_val[1]) >=0.0):
sunelev = float(Sun_val[1])
else:
print("Sun elevation value out of bounds, examine MTL file")
print(Sun_val)
print(Mp, Ap, sunelev)
######## --------- CONVERT TO TOA REFLECTANCE --------- ########
# Open the multiband landsat image
img=geoio.GeoImage(in_filename)
# Numpy arrays of tif
data=img.get_data()
# Calculate TOA reflectances - equations from https://landsat.usgs.gov/using-usgs-landsat-8-product
newdata = Mp * data + Ap
solzenith = 90-sunelev
TOA_refl = newdata/math.cos(solzenith/360*2*math.pi)
img.write_img_like_this(out_filename, TOA_refl)
```
#### File: rs_tools/bin/ndvi.py
```python
import argparse
import numpy as np
import rasterio as rio
import sys
def read_file(fn):
with rio.open(fn) as f:
arr=f.read()
prf=f.profile
ndv=f.nodata
return arr, prf, ndv
def calc_ndvi(red_arr, nir1_arr, r_ndv=None, nir1_ndv=None):
# Calculate NDVI
ndvi = (nir1_arr - red_arr) / (nir1_arr + red_arr)
# Create normalized ndvi array from 0-1 for further processing with min-max scaling
ndvi_norm = (ndvi+1)/2
if (r_ndv is None) & (nir1_ndv is None):
ndsi_ndv=9999
else:
ndsi_ndv=r_ndv
# Mask with ndv areas from original arrays
ndvi[red_arr==r_ndv]=r_ndv
ndvi[nir1_arr==nir1_ndv]=nir1_ndv
ndvi_norm[red_arr==r_ndv]=r_ndv
ndvi_norm[nir1_arr==nir1_ndv]=nir1_ndv
return ndvi, ndvi_norm
def run(multi_band_file, out_fn, nir1_fn, red_fn, px_res, modifier):
try:
if (multi_band_file is not None) & (modifier is not None):
red_arr, prf, r_ndv = read_file(multi_band_file[:-4] + "_b5_" + modifier + "_refl.tif")
RE_arr, _, RE_ndv = read_file(multi_band_file[:-4] + "_b6_" + modifier + "_refl.tif")
nir1_arr, _, nir1_ndv = read_file(multi_band_file[:-4] + "_b7_" + modifier + "_refl.tif")
elif (red_fn is not None) & (nir1_fn is not None):
red_arr, prf, r_ndv = read_file(red_fn)
nir1_arr, _, nir1_ndv = read_file(nir1_fn)
else:
sys.exit("Check input files, missing proper input")
ndvi, ndvi_norm = calc_ndvi(red_arr, nir1_arr, r_ndv, nir1_ndv)
ndvi_RE, ndvi_norm_RE = calc_ndvi(RE_arr, nir1_arr, RE_ndv, nir1_ndv)
# Write NDVI arrays to file
try:
with rio.Env():
prf.update(
dtype=rio.float32,
count=1,
compress='lzw')
with rio.open(out_fn, 'w', **prf) as dst:
dst.write(np.squeeze(ndvi).astype(rio.float32), 1)
with rio.open(out_fn[:-4]+"_minmax.tif", 'w', **prf) as dst:
dst.write(np.squeeze(ndvi_norm).astype(rio.float32), 1)
with rio.open(out_fn[:-4]+"_RE.tif", 'w', **prf) as dst:
dst.write(np.squeeze(ndvi_RE).astype(rio.float32), 1)
with rio.open(out_fn[:-4]+"_RE_minmax.tif", 'w', **prf) as dst:
dst.write(np.squeeze(ndvi_norm_RE).astype(rio.float32), 1)
except:
print("Cannot write out calculated NDVI")
except:
print("Cannot calculate NDVI, check inputs")
def get_parser():
parser = argparse.ArgumentParser(description='Normalized Difference Vegetation Index Calculation Script')
parser.add_argument('-in', '--MS_input_file', help='Multiband MS image file', required=False)
parser.add_argument('-out', '--output_file', help='NDVI output filename', default="ndvi.tif", required=False)
parser.add_argument('-r', '--red_band', help='Single-band red input', required=False)
parser.add_argument('-n', '--nir_band', help='Single-band NIR channel input', required=False)
parser.add_argument('-res', '--px_res', help='Pixel resolution, default is 1.2m', default="1.2", required=False)
parser.add_argument('-m', '--mod', help='Modifiers to single band filenames')
return parser
def main():
parser = get_parser()
args = parser.parse_args()
in_fn = args.MS_input_file
out_fn = args.output_file
if out_fn is None:
out_fn='ndvi.tif'
nir1_fn=args.nir_band
red_fn=args.red_band
px_res=args.px_res
# Mosaicked handling
if (args.mod == "None") | (args.mod is None):
modifier=px_res[0]+px_res[-1]
else:
modifier=args.mod + "_" + px_res[0]+px_res[-1]
# print(in_fn, out_fn, nir1_fn, red_fn, px_res, modifier)
run(in_fn, out_fn, nir1_fn, red_fn, px_res, modifier)
if __name__ == "__main__":
main()
``` |
{
"source": "jmichellehu/snowtools",
"score": 3
} |
#### File: snowtools/snowtools/get_snotel.py
```python
import sys
import os
import argparse
from datetime import datetime
import pytz
import numpy as np
import matplotlib.pyplot as plt
import ulmo
from ulmo.util import convert_datetime
from osgeo import gdal
from pygeotools.lib import malib, timelib, geolib, iolib
#URL for query
wsdlurl = "http://worldwater.byu.edu/interactive/snotel/services/index.php/cuahsi_1_1.asmx?WSDL"
def get_all_lonlat(outdir='.'):
"""
This will fetch site code, lat, lon for all SNOTEL sites
Only needs to be run once.
"""
csv_fn = os.path.join(outdir, 'snotel_lonlat.csv')
if not os.path.exists(csv_fn):
print("Generating list of SNOTEL sites and coordinates")
sites = ulmo.cuahsi.wof.get_sites(wsdlurl)
lon = []
lat = []
code = []
z = []
for k,v in sites.iteritems():
lon.append(float(v['location']['longitude']))
lat.append(float(v['location']['latitude']))
code.append(int(v['code']))
z.append(float(v['elevation_m']))
out = np.array(zip(code,lon,lat))
np.savetxt(csv_fn, out, delimiter=',', fmt='%i,%0.5f,%0.5f')
else:
out = np.loadtxt(csv_fn, delimiter=',', dtype=None)
return out
def site_filter_extent(extent, srs=geolib.wgs_srs, pad=None):
"""
Filter available sites for a given lat/lon extent
"""
sites = get_all_lonlat()
sites_srs = geolib.wgs_srs
if not srs.IsSame(sites_srs):
print("Converting SNOTEL lat/lon to input ds projection")
#This returns (x,y,z) coordinate arrays
sites_proj = np.array(geolib.cT_helper(sites[:,1], sites[:,2], 0, sites_srs, srs)).T
#Replace the original lon and lat coordinates with projected x and y
sites[:,1:3] = sites_proj[:,0:2]
#print(extent)
#print(sites)
if pad is not None:
print("Padding original extent by: %s km" % pad)
#Convert to meters
pad *= 1000.
extent = geolib.pad_extent(extent, width=pad)
#print(extent)
valid_idx = ((sites[:,1] > extent[0]) & (sites[:,1] < extent[2]) & (sites[:,2] > extent[1]) & (sites[:,2] < extent[3]))
valid_sites = sites[valid_idx]
#Only return site codes, not lat/lon
#valid_sites = valid_sites[:,0].astype(int)
if valid_sites.size == 0:
valid_sites = None
return valid_sites
def site_filter_extent_ds(ds, pad=None):
"""
Filter available sites for a given dataset
"""
snotel_srs = geolib.wgs_srs
ds_srs = geolib.get_ds_srs(ds)
extent = geolib.ds_extent(ds)
#extent = geolib.ds_extent(ds, snotel_srs)
#geom = geolib.get_outline(ds)
return site_filter_extent(extent, ds_srs, pad)
def get_series_dt(series, strptime_fmt='%Y-%m-%dT%H:%M:%S'):
"""
Get datetime series
"""
#ts = [convert_datetime(vd['date_time_utc']).replace(tzinfo=pytz.utc) for vd in series['values']]
ts = [datetime.strptime(vd['date_time_utc'], strptime_fmt) for vd in series['values']]
return np.array(ts, dtype=np.datetime64)
def get_series_val(series):
"""
Get value series
"""
# Create a clean timeseries list of (dt,val) tuples
val = [float(vd['value']) for vd in series['values']]
val = np.ma.masked_equal(val, -9999)
val = np.ma.masked_equal(val, 0.0)
return val
def map_plot(site_list, ds):
a = iolib.ds_getma(ds)
clim = malib.calcperc(a, (2,98))
mX = site_list[:,1]
mY = site_list[:,2]
pX, pY = geolib.mapToPixel(mX, mY, ds.GetGeoTransform())
#f, ax = plt.subplots(1, figsize=(6,6), subplot_kw={'aspect':'equal', 'adjustable':'box-forced'})
f, ax = plt.subplots(1, figsize=(6,6), subplot_kw={'aspect':'equal'})
im = ax.imshow(a, vmin=clim[0], vmax=clim[1], cmap='inferno')
ax.set_facecolor('0.5')
from imview.lib import pltlib
pltlib.add_scalebar(ax, geolib.get_res(ds)[0])
ax.scatter(pX, pY, s=16, facecolors='w', edgecolors='k')
for i, lbl in enumerate(site_list[:,0]):
bbox=dict(boxstyle='round,pad=0.1', fc='k', alpha=0.7)
ax.annotate(str(int(lbl)), xy=(pX[i], pY[i]), xytext=(0, 4), textcoords='offset points', fontsize=8, color='w', bbox=bbox)
return f
def getparser():
parser = argparse.ArgumentParser(description="Identify, download, and process SNOTEL records")
parser.add_argument('-dt_start', default=None, type=int, help='Start timestamp (format: YYYYMMDD), leave as None for earliest record')
parser.add_argument('-dt_end', default=None, type=int, help='End timestamp (format: YYYYMMDD), leave as None for latest record')
parser.add_argument('-outdir', default=os.getcwd(), help='Directory to store intermediate products (default: %(default)s)')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-fn', type=str, help='Raster filename to match (e.g., YYYYMMDD_raster.tif)')
#parser.add_argument('-dt_pad', type=int, default=366, help='Combine data from this many days before and after target date (default: %(default)s)')
group.add_argument('-extent', default=None, type=float, nargs=4, metavar=('MINLON', 'MINLAT', 'MAXLON', 'MAXLAT'), help='Spatial extent for query')
group.add_argument('-stack_fn', type=str, help='DEM stack filename to match (e.g., YYYYMMDD_YYYYMMDD_stack_n.tif)')
parser.add_argument('-extent_pad', type=float, help='Amount to padding for extent, in km')
#Incremental precip, cumulative precip
#PRCP (mm), PREC (mm), SNWD (cm), TAVG, TMAX, TMIN, WTEQ (mm)
vlist_choices = ['PRCP', 'PREC', 'SNWD', 'TAVG', 'TMAX', 'TMIN', 'WTEQ']
parser.add_argument('-vlist', nargs='+', type=str, choices=vlist_choices, default=['SNWD', 'WTEQ'], help='SNOTEL variables to query')
return parser
#def main():
parser = getparser()
args = parser.parse_args()
#Set start/end date range for data
#dt_start = datetime(1932,1,1)
dt_start = args.dt_start
#dt_end = datetime.now()
dt_end = args.dt_end
if dt_start is not None:
dt_start = datetime.strptime(args.dt_start, '%Y%m%d')
if dt_end is not None:
dt_end = datetime.strptime(args.dt_end, '%Y%m%d')
#Clean this up
if args.fn is not None:
if os.path.exists(args.fn):
fn = args.fn
ds = gdal.Open(fn)
site_list = site_filter_extent_ds(ds, pad=args.extent_pad)
elif args.extent is not None:
site_list = site_filter_extent(extent, pad=args.extent_pad)
elif args.stack_fn is not None:
#DEM stack, can be used to plot lines on SNOTEL time series
stack = malib.DEMStack(stack_fn=args.stack_fn)
dem_dt = stack.date_list
ds = stack.get_ds()
site_list = site_filter_extent_ds(ds, pad=args.extent_pad)
else:
sys.exit("Must provide valid raster filename or lat/lon extent")
#sitename = 'baker'
#site_list = [999, 909, 1011, 910]
#sitename = 'gm'
#site_list = [622, 682]
if site_list is None:
sys.exit("No valid sites identified")
vlist = args.vlist
#Accuracy of measurements, in cm
#https://www.wcc.nrcs.usda.gov/snotel/snotel_sensors.html
sigma_factor = 3
snwd_precision = sigma_factor*1.27/100.
wteq_precision = sigma_factor*0.254/100.
print("\nSite codes: %s" % ', '.join(map(str,site_list[:,0].astype(int))))
print("Start date: %s" % dt_start)
print("End date: %s" % dt_end)
print("Variables: %s\n" % ','.join(vlist))
d = {}
for n, site in enumerate(site_list):
sitecode = int(site[0])
print('Processing site %i of %i: %i' % ((n+1), len(site_list), sitecode))
sitekey = 'SNOTEL:%i' % sitecode
#site = ulmo.cuahsi.wof.get_site_info(wsdlurl, sitekey)
#Get first variable, use to set dates
v = vlist[0]
sitev = 'SNOTEL:%s' % v
print(sitev)
series = ulmo.cuahsi.wof.get_values(wsdlurl, sitekey, sitev, start=dt_start, end=dt_end)
dt = get_series_dt(series)
d[sitecode] = {'dt':dt}
val = get_series_val(series)
d[sitecode][v] = val
for v in vlist[1:]:
sitev = 'SNOTEL:%s' % v
print(sitev)
series = ulmo.cuahsi.wof.get_values(wsdlurl, sitekey, sitev, start=dt_start, end=dt_end)
#dt = series['values']['date_time_utc']
#vals = series['values']['value']
val = get_series_val(series)
#Looks like these are not always updated simultaneously, make sure the records are same length
#Should probably just query both dt and vals simultaneously, rather than assume all variables are same length
if val.size != dt.size:
val = val[0:dt.size]
d[sitecode][v] = val
#Convert SNWD to m
d[sitecode]['SNWD'] /= 100.
d[sitecode]['WTEQ'] /= 1000.
#Mask values less than instrument precision
d[sitecode]['SNWD'] = np.ma.masked_less(d[sitecode]['SNWD'], snwd_precision)
d[sitecode]['WTEQ'] = np.ma.masked_less(d[sitecode]['WTEQ'], wteq_precision)
#Calculate density in g/cc
rho = (d[sitecode]['WTEQ']/d[sitecode]['SNWD'])
#Mask density values when snow depth is small, helps avoid bogus density values
depth_thresh = 0.2
rho[(d[sitecode]['SNWD'] < depth_thresh)] = np.ma.masked
d[sitecode]['Density'] = rho
vlist.append('Density')
print("Plotting")
ts_f, ts_axa = plt.subplots(len(vlist), 1, sharex=True, figsize=(10,7.5))
for sitecode in d.keys():
#For some reason, can't subtract datetime from np.datetime64
dt = d[sitecode]['dt'].astype(datetime)
for n,v in enumerate(vlist):
vmed = np.ma.median(d[sitecode][v])
#vmean = np.ma.mean(d[sitecode][[)
#lbl = '%s: %0.2f' % (sitecode, vmed)
lbl = str(sitecode)
p = ts_axa[n].plot(dt, d[sitecode][v], marker='o', ms=1, linestyle='', label=lbl)
ts_axa[n].set_ylabel(vlist[n])
ts_axa[n].axhline(vmed, c=p[0].get_color(), linestyle=':', linewidth=0.5)
ts_axa[0].set_ylabel('Snow Depth (m)')
ts_axa[1].set_ylabel('SWE (m w.e.)')
ts_axa[n].set_ylabel('Density (g/cc)')
ts_axa[n].xaxis_date()
ts_axa[n].set_ylim(0,1.0)
ts_axa[n].legend(prop={'size':8})
#Plot lines for DEM timestamps
if args.stack_fn is not None:
for dt in dem_dt:
ts_axa[0].axvline(dt, color='k', alpha=0.2)
ts_axa[2].axvline(dt, color='k', alpha=0.2)
for sitecode in d.keys():
#For some reason, can't subtract datetime from np.datetime64
dt_list = d[sitecode]['dt'].astype(datetime)
dt_idx = timelib.get_closest_dt_padded_idx(dt.date(), dt_list, pad=3)
rho_mean = np.mean(d[sitecode]['Density'][dt_idx])
print(dt, sitecode, rho_mean)
plt.tight_layout()
ts_f.autofmt_xdate()
map_f = map_plot(site_list, ds)
if False:
fig_fn = '%s_SNOTEL_ts.pdf' % fn
plt.savefig(fig_fn, bbox_inches='tight')
fig_fn = '%s_SNOTEL_ts.png' % fn
plt.savefig(fig_fn, dpi=300, bbox_inches='tight')
"""
#Limit time series plot to recent years
ts_axa[n].set_xlim(datetime(2013,8,1), datetime(2016,6,30))
fig_fn = '%s_SNOTEL_2013-2016.png' % sitename
f.set_size_inches(4,7.5)
plt.tight_layout()
plt.savefig(fig_fn, dpi=300, bbox_inches='tight')
"""
plt.show()
``` |
{
"source": "jmichelsen/django-fixtureless",
"score": 2
} |
#### File: test_app/tests/utils.py
```python
from django.test import TestCase
from fixtureless.utils import list_get
class ListGetTest(TestCase):
def test_list_get(self):
array = list()
index = 0
val = list_get(array, index)
self.assertIsNone(val)
val = list_get(array, index, 'test')
self.assertEqual(val, 'test')
array = [1, 2]
val = list_get(array, index)
self.assertEqual(val, 1)
``` |
{
"source": "jmichuda/bmi-214-final-project",
"score": 2
} |
#### File: bmi-214-final-project/src/add_civic_onto.py
```python
from owlready2 import *
from src.generate_data import add_civic_variants, add_civic_cnas
import defopt
def main(input_path: str, civic_path:str, output_path: str):
onto = get_ontology(input_path).load()
with onto:
onto = add_civic_variants(onto, civic_path)
onto = add_civic_cnas(onto, civic_path)
onto.save(file = output_path)
if __name__ == "__main__":
defopt.run(main)
```
#### File: bmi-214-final-project/src/AnnotatorCore.py
```python
import json
import sys
import csv
from enum import Enum
import requests
import os.path
import logging
import re
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from datetime import date
import ctypes as ct
logging.basicConfig(level=logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
log = logging.getLogger('AnnotatorCore')
# API timeout is set to two minutes
REQUEST_TIMEOUT = 240
csv.field_size_limit(int(ct.c_ulong(-1).value // 2)) # Deal with overflow problem on Windows, https://stackoverflow.co/120m/questions/15063936/csv-error-field-larger-than-field-limit-131072
sizeLimit = csv.field_size_limit()
csv.field_size_limit(sizeLimit) # for reading large files
oncokbapiurl = "https://www.oncokb.org/api/v1"
oncokbapibearertoken = ""
def setoncokbbaseurl(u):
global oncokbapiurl
oncokbapiurl = u.rstrip('/') + '/api/v1'
def setoncokbapitoken(t):
global oncokbapibearertoken
oncokbapibearertoken = t.strip()
cancerhotspotsbaseurl = "http://www.cancerhotspots.org"
def setcancerhotspotsbaseurl(u):
global cancerhotspotsbaseurl
cancerhotspotsbaseurl = u
_3dhotspotsbaseurl = "http://www.3dhotspots.org"
def set3dhotspotsbaseurl(u):
global _3dhotspotsbaseurl
_3dhotspotsbaseurl = u
sampleidsfilter = None
def setsampleidsfileterfile(f):
global sampleidsfilter
content = [line.rstrip() for line in open(f)]
sampleidsfilter = set(content)
log.info(len(sampleidsfilter))
GENE_IN_ONCOKB_HEADER = 'GENE_IN_ONCOKB'
VARIANT_IN_ONCOKB_HEADER = 'VARIANT_IN_ONCOKB'
GENE_IN_ONCOKB_DEFAULT = 'False'
VARIANT_IN_ONCOKB_DEFAULT = 'False'
levels = [
'LEVEL_1',
'LEVEL_2',
'LEVEL_3A',
'LEVEL_3B',
'LEVEL_4',
'LEVEL_R1',
'LEVEL_R2',
'LEVEL_R3'
]
dxLevels = [
'LEVEL_Dx1',
'LEVEL_Dx2',
'LEVEL_Dx3'
]
pxLevels = [
'LEVEL_Px1',
'LEVEL_Px2',
'LEVEL_Px3'
]
mutationtypeconsequencemap = {
'3\'Flank': ['any'],
'5\'Flank ': ['any'],
'Targeted_Region': ['inframe_deletion', 'inframe_insertion'],
'COMPLEX_INDEL': ['inframe_deletion', 'inframe_insertion'],
'ESSENTIAL_SPLICE_SITE': ['feature_truncation'],
'Exon skipping': ['inframe_deletion'],
'Frameshift deletion': ['frameshift_variant'],
'Frameshift insertion': ['frameshift_variant'],
'FRAMESHIFT_CODING': ['frameshift_variant'],
'Frame_Shift_Del': ['frameshift_variant'],
'Frame_Shift_Ins': ['frameshift_variant'],
'Fusion': ['fusion'],
'Indel': ['frameshift_variant', 'inframe_deletion', 'inframe_insertion'],
'In_Frame_Del': ['inframe_deletion'],
'In_Frame_Ins': ['inframe_insertion'],
'Missense': ['missense_variant'],
'Missense_Mutation': ['missense_variant'],
'Nonsense_Mutation': ['stop_gained'],
'Nonstop_Mutation': ['stop_lost'],
'Splice_Site': ['splice_region_variant'],
'Splice_Site_Del': ['splice_region_variant'],
'Splice_Site_SNP': ['splice_region_variant'],
'splicing': ['splice_region_variant'],
'Translation_Start_Site': ['start_lost'],
'vIII deletion': ['any']
}
# column headers
HUGO_HEADERS = ['HUGO_SYMBOL', 'HUGO_GENE_SYMBOL', 'GENE']
CONSEQUENCE_HEADERS = ['VARIANT_CLASSIFICATION', 'MUTATION_TYPE']
ALTERATION_HEADER = 'ALTERATION'
HGVSP_SHORT_HEADER = 'HGVSP_SHORT'
HGVSP_HEADER = 'HGVSP'
HGVSG_HEADER = 'HGVSG'
HGVS_HEADERS = [ALTERATION_HEADER, HGVSP_SHORT_HEADER, HGVSP_HEADER, HGVSG_HEADER, 'AMINO_ACID_CHANGE', 'FUSION']
SAMPLE_HEADERS = ['SAMPLE_ID', 'TUMOR_SAMPLE_BARCODE']
PROTEIN_START_HEADERS = ['PROTEIN_START']
PROTEIN_END_HEADERS = ['PROTEIN_END']
PROTEIN_POSITION_HEADERS = ['PROTEIN_POSITION']
CANCER_TYPE_HEADERS = ['ONCOTREE_CODE', 'CANCER_TYPE']
FUSION_HEADERS = ['FUSION']
REFERENCE_GENOME_HEADERS = ['NCBI_BUILD', 'REFERENCE_GENOME']
# columns for genomic change annotation
GC_CHROMOSOME_HEADER = 'CHROMOSOME'
GC_START_POSITION_HEADER = 'START_POSITION'
GC_END_POSITION_HEADER = 'END_POSITION'
GC_REF_ALLELE_HEADER = 'REFERENCE_ALLELE'
GC_VAR_ALLELE_1_HEADER = 'TUMOR_SEQ_ALLELE1'
GC_VAR_ALLELE_2_HEADER = 'TUMOR_SEQ_ALLELE2'
GENOMIC_CHANGE_HEADERS = [GC_CHROMOSOME_HEADER, GC_START_POSITION_HEADER, GC_END_POSITION_HEADER, GC_REF_ALLELE_HEADER, GC_VAR_ALLELE_1_HEADER, GC_VAR_ALLELE_2_HEADER]
class QueryType(Enum):
HGVSP_SHORT = 'HGVSP_SHORT'
HGVSP = 'HGVSP'
HGVSG = 'HGVSG'
GENOMIC_CHANGE = 'GENOMIC_CHANGE'
class ReferenceGenome(Enum):
GRCH37 = 'GRCh37'
GRCH38 = 'GRCh38'
REQUIRED_QUERY_TYPE_COLUMNS = {
QueryType.HGVSP_SHORT: [HGVSP_SHORT_HEADER],
QueryType.HGVSP: [HGVSP_HEADER],
QueryType.HGVSG: [HGVSG_HEADER],
QueryType.GENOMIC_CHANGE: GENOMIC_CHANGE_HEADERS
}
POST_QUERIES_THRESHOLD = 200
POST_QUERIES_THRESHOLD_GC_HGVSG = 100
def getOncokbInfo():
ret = ['Files annotated on ' + date.today().strftime('%m/%d/%Y') + "\nOncoKB API URL: "+oncokbapiurl]
try:
info = requests.get(oncokbapiurl + "/info", timeout=REQUEST_TIMEOUT).json()
ret.append('\nOncoKB data version: ' + info['dataVersion']['version']+', released on ' + info['dataVersion']['date'])
except:
log.error("error when fetch OncoKB info")
return ''.join(ret)
def generateReadme(outfile):
outf = open(outfile, 'w+', 1000)
outf.write(getOncokbInfo())
outf.close()
def gethotspots(url, type):
hotspots = {}
response = requests.get(url, timeout=REQUEST_TIMEOUT)
if response.status_code == 200:
hotspotsjson = response.json()
for hs in hotspotsjson:
gene = hs['hugoSymbol']
start = hs['aminoAcidPosition']['start']
end = hs['aminoAcidPosition']['end']
if type is None or hs['type'] == type:
if gene not in hotspots:
hotspots[gene] = set()
for i in range(start, end + 1):
hotspots[gene].add(i)
else:
log.error("error when processing %s \n" % url +
"reason: %s" % response.reason)
return hotspots
def makeoncokbpostrequest(url, body):
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % oncokbapibearertoken
}
return requests.post(url, headers=headers, data=json.dumps(body, default=lambda o: o.__dict__),
timeout=REQUEST_TIMEOUT)
def makeoncokbgetrequest(url):
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % oncokbapibearertoken
}
return requests.get(url, headers=headers, timeout=REQUEST_TIMEOUT)
_3dhotspots = None
def init_3d_hotspots():
global _3dhotspots
_3dhotspots = gethotspots(_3dhotspotsbaseurl+"/api/hotspots/3d", None)
conversiondict = {'Ala': 'A',
'Asx': 'B',
'Cys': 'C',
'Asp': 'D',
'Glu': 'E',
'Phe': 'F',
'Gly': 'G',
'His': 'H',
'Ile': 'I',
'Lys': 'K',
'Leu': 'L',
'Met': 'M',
'Asn': 'N',
'Pro': 'P',
'Gln': 'Q',
'Arg': 'R',
'Ser': 'S',
'Thr': 'T',
'Val': 'V',
'Trp': 'W',
'Tyr': 'Y',
'Glx': 'Z'
}
conversionlist = conversiondict.keys()
def conversion(hgvs):
threecharactersearch = re.findall('[a-zA-Z]{3}\d+', hgvs, flags=re.IGNORECASE)
if threecharactersearch:
if any(letters.lower() in hgvs.lower() for letters in conversionlist):
return replace_all(hgvs)
return hgvs
def replace_all(hgvs):
# Author: <NAME>
pattern = re.compile('|'.join(conversionlist), re.IGNORECASE)
return pattern.sub(lambda m: conversiondict[m.group().capitalize()], hgvs)
def append_annotation_to_file(outf, ncols, rows, annotations):
if len(rows) != len(annotations):
log.error('The length of the rows and annotations do not match')
for index, annotation in enumerate(annotations):
row = rows[index]
if annotation is not None:
row = row + annotation
row = padrow(row, ncols)
rowstr = '\t'.join(row)
rowstr = rowstr.encode('ascii', 'ignore').decode('ascii')
outf.write(rowstr + "\n")
def get_tumor_type_from_row(row, row_index, defaultCancerType, icancertype, cancerTypeMap, sample):
cancertype = defaultCancerType
if icancertype >= 0:
row_cancer_type = get_cell_content(row, icancertype)
if row_cancer_type is not None:
cancertype = row_cancer_type
if sample in cancerTypeMap:
cancertype = cancerTypeMap[sample]
if cancertype == "":
log.info("Cancer type for the sample should be defined for a more accurate result\nline %s: %s\n" % (row_index, row))
# continue
return cancertype
def has_desired_headers(desired_headers, file_headers):
has_required_headers = True
for header in desired_headers:
if header not in file_headers:
has_required_headers = False
break
return has_required_headers
def resolve_query_type(user_input_query_type, headers):
selected_query_type = None
if isinstance(user_input_query_type, QueryType):
selected_query_type = user_input_query_type
if selected_query_type is None and HGVSP_SHORT_HEADER in headers:
selected_query_type = QueryType.HGVSP_SHORT
if selected_query_type is None and HGVSP_HEADER in headers:
selected_query_type = QueryType.HGVSP
if selected_query_type is None and HGVSG_HEADER in headers:
selected_query_type = QueryType.HGVSG
if selected_query_type is None and has_desired_headers(REQUIRED_QUERY_TYPE_COLUMNS[QueryType.GENOMIC_CHANGE], headers):
selected_query_type = QueryType.GENOMIC_CHANGE
# default to HGVSp_Short
if selected_query_type is None:
selected_query_type = QueryType.HGVSP_SHORT
# check the file has required columns
if has_desired_headers(REQUIRED_QUERY_TYPE_COLUMNS[selected_query_type], headers) == False:
# when it is False, it will never be GENOMIC_CHANGE. For other types, we need to check whether ALTERATION column is available
if ALTERATION_HEADER not in headers:
raise Exception("The file does not have required columns "
+ ', '.join(REQUIRED_QUERY_TYPE_COLUMNS[user_input_query_type])
+ " for the query type: " + user_input_query_type.value)
return selected_query_type
def get_reference_genome_from_row(row_reference_genome, default_reference_genome):
reference_genome = default_reference_genome
if row_reference_genome is not None and row_reference_genome != '':
try:
reference_genome = ReferenceGenome[row_reference_genome.upper()]
except KeyError:
log.warning('Unexpected reference genome, only GRCh37 and GRCh38 are supported.' + (
' Use default.' if default_reference_genome is not None else ' Skipping.'))
return reference_genome
def processalterationevents(eventfile, outfile, previousoutfile, defaultCancerType, cancerTypeMap,
annotatehotspots, user_input_query_type, default_reference_genome):
if annotatehotspots:
init_3d_hotspots()
if os.path.isfile(previousoutfile):
cacheannotated(previousoutfile, defaultCancerType, cancerTypeMap)
outf = open(outfile, 'w+', 1000)
with open(eventfile, 'rU') as infile:
reader = csv.reader(infile, delimiter='\t')
headers = readheaders(reader)
ncols = headers["length"]
if ncols == 0:
return
newncols = 0
outf.write(headers['^-$'])
if annotatehotspots:
outf.write("\tIS-A-HOTSPOT")
outf.write("\tIS-A-3D-HOTSPOT")
newncols += 2
outf.write("\t" + GENE_IN_ONCOKB_HEADER)
outf.write("\t" + VARIANT_IN_ONCOKB_HEADER)
outf.write("\tMUTATION_EFFECT")
outf.write("\tMUTATION_EFFECT_CITATIONS")
outf.write("\tONCOGENIC")
newncols += 5
for l in levels:
outf.write('\t' + l)
newncols += len(levels)
outf.write("\tHIGHEST_LEVEL")
outf.write("\tTX_CITATIONS")
newncols += 2
for l in dxLevels:
outf.write('\t' + l)
newncols += len(dxLevels)
outf.write("\tHIGHEST_DX_LEVEL")
outf.write("\tDX_CITATIONS")
newncols += 2
for l in pxLevels:
outf.write('\t' + l)
newncols += len(pxLevels)
outf.write("\tHIGHEST_PX_LEVEL")
outf.write("\tPX_CITATIONS")
newncols += 2
outf.write("\n")
query_type = resolve_query_type(user_input_query_type, headers)
if (query_type == QueryType.HGVSP_SHORT):
process_alteration(reader, outf, headers, [HGVSP_SHORT_HEADER, ALTERATION_HEADER], ncols, newncols,
defaultCancerType,
cancerTypeMap, annotatehotspots, default_reference_genome)
if (query_type == QueryType.HGVSP):
process_alteration(reader, outf, headers, [HGVSP_HEADER, ALTERATION_HEADER], ncols, newncols, defaultCancerType,
cancerTypeMap, annotatehotspots, default_reference_genome)
if (query_type == QueryType.HGVSG):
process_hvsg(reader, outf, headers, [HGVSG_HEADER, ALTERATION_HEADER], ncols, newncols, defaultCancerType,
cancerTypeMap, annotatehotspots, default_reference_genome)
if (query_type == QueryType.GENOMIC_CHANGE):
process_genomic_change(reader, outf, headers, ncols, newncols, defaultCancerType, cancerTypeMap, annotatehotspots, default_reference_genome)
outf.close()
def get_cell_content(row, index, return_empty_string=False):
if index >= 0 and row[index] != 'NULL' and row[index] != '':
return row[index]
elif return_empty_string:
return ''
else:
return None
def process_alteration(maffilereader, outf, maf_headers, alteration_column_names, ncols, nannotationcols, defaultCancerType, cancerTypeMap,
annotatehotspots, default_reference_genome):
ihugo = geIndexOfHeader(maf_headers, HUGO_HEADERS)
iconsequence = geIndexOfHeader(maf_headers, CONSEQUENCE_HEADERS)
ihgvs = geIndexOfHeader(maf_headers, alteration_column_names)
isample = geIndexOfHeader(maf_headers, SAMPLE_HEADERS)
istart = geIndexOfHeader(maf_headers, PROTEIN_START_HEADERS)
iend = geIndexOfHeader(maf_headers, PROTEIN_END_HEADERS)
iproteinpos = geIndexOfHeader(maf_headers, PROTEIN_POSITION_HEADERS)
icancertype = geIndexOfHeader(maf_headers, CANCER_TYPE_HEADERS)
ireferencegenome= geIndexOfHeader(maf_headers, REFERENCE_GENOME_HEADERS)
posp = re.compile('[0-9]+')
i = 0
queries = []
rows = []
for row in maffilereader:
i = i + 1
if i % POST_QUERIES_THRESHOLD == 0:
log.info(i)
row = padrow(row, ncols)
sample = row[isample]
if sampleidsfilter and sample not in sampleidsfilter:
continue
hugo = row[ihugo]
consequence = get_cell_content(row, iconsequence)
if consequence in mutationtypeconsequencemap:
consequence = '%2B'.join(mutationtypeconsequencemap[consequence])
hgvs = row[ihgvs]
if hgvs.startswith('p.'):
hgvs = hgvs[2:]
cancertype = get_tumor_type_from_row(row, i, defaultCancerType, icancertype, cancerTypeMap, sample)
reference_genome = get_reference_genome_from_row(get_cell_content(row, ireferencegenome), default_reference_genome)
hgvs = conversion(hgvs)
start = get_cell_content(row, istart)
end = get_cell_content(row, iend)
if start is None and iproteinpos >= 0 and row[iproteinpos] != "" and row[iproteinpos] != "." and row[iproteinpos] != "-":
poss = row[iproteinpos].split('/')[0].split('-')
try:
if len(poss) > 0:
start = int(poss[0])
if len(poss) == 2:
end = int(poss[1])
except ValueError:
log.info("position wrong at line %s: %s" % (str(i), row[iproteinpos]))
if start is None and consequence == "missense_variant":
m = posp.search(hgvs)
if m:
start = m.group()
if start is not None and end is None:
end = start
query = ProteinChangeQuery(hugo, hgvs, cancertype, reference_genome, consequence, start, end)
queries.append(query)
rows.append(row)
if len(queries) == POST_QUERIES_THRESHOLD:
annotations = pull_protein_change_info(queries,annotatehotspots)
append_annotation_to_file(outf, ncols + nannotationcols, rows, annotations)
queries = []
rows = []
if len(queries) > 0:
annotations = pull_protein_change_info(queries,annotatehotspots)
append_annotation_to_file(outf, ncols + nannotationcols, rows, annotations)
# this method is from genome-nexus annotation-tools
# https://github.com/genome-nexus/annotation-tools/blob/53ff7f7fe673e961282f871ebc78d2ecc0831919/standardize_mutation_data.py
def get_var_allele(ref_allele, tumor_seq_allele1, tumor_seq_allele2):
# set the general tumor_seq_allele as the first non-ref allele encountered
# this will be used to resolve the variant classification and variant type
# if there are no tumor alleles that do not match the ref allele then use empty string
# in the event that this happens then there might be something wrong with the data itself
# if both alleles are different, use allele2. Stick with the logic of GenomeNexus
try:
tumor_seq_allele = ""
if ref_allele != tumor_seq_allele2:
tumor_seq_allele = tumor_seq_allele2
elif ref_allele != tumor_seq_allele1:
tumor_seq_allele = tumor_seq_allele1
except:
tumor_seq_allele = ""
return tumor_seq_allele
def process_genomic_change(maffilereader, outf, maf_headers, ncols, nannotationcols, defaultCancerType, cancerTypeMap, annotatehotspots, default_reference_genome):
ichromosome = geIndexOfHeader(maf_headers, [GC_CHROMOSOME_HEADER])
istart = geIndexOfHeader(maf_headers, [GC_START_POSITION_HEADER])
iend = geIndexOfHeader(maf_headers, [GC_END_POSITION_HEADER])
irefallele = geIndexOfHeader(maf_headers, [GC_REF_ALLELE_HEADER])
ivarallele1 = geIndexOfHeader(maf_headers, [GC_VAR_ALLELE_1_HEADER])
ivarallele2 = geIndexOfHeader(maf_headers, [GC_VAR_ALLELE_2_HEADER])
isample = geIndexOfHeader(maf_headers, SAMPLE_HEADERS)
icancertype = geIndexOfHeader(maf_headers, CANCER_TYPE_HEADERS)
ireferencegenome= geIndexOfHeader(maf_headers, REFERENCE_GENOME_HEADERS)
posp = re.compile('[0-9]+')
i = 0
queries = []
rows = []
for row in maffilereader:
i = i + 1
if i % POST_QUERIES_THRESHOLD_GC_HGVSG == 0:
log.info(i)
row = padrow(row, ncols)
sample = row[isample]
if sampleidsfilter and sample not in sampleidsfilter:
continue
cancertype = get_tumor_type_from_row(row, i, defaultCancerType, icancertype, cancerTypeMap, sample)
reference_genome = get_reference_genome_from_row(get_cell_content(row, ireferencegenome), default_reference_genome)
chromosome = get_cell_content(row, ichromosome, True)
start = get_cell_content(row, istart, True)
end = get_cell_content(row, iend, True)
ref_allele = get_cell_content(row, irefallele, True)
var_allele_1 = get_cell_content(row, ivarallele1, True)
var_allele_2 = get_cell_content(row, ivarallele2, True)
var_allele = get_var_allele(ref_allele, var_allele_1, var_allele_2)
query = GenomicChangeQuery(chromosome, start, end, ref_allele, var_allele, cancertype, reference_genome)
queries.append(query)
rows.append(row)
if len(queries) == POST_QUERIES_THRESHOLD_GC_HGVSG:
annotations = pull_genomic_change_info(queries,annotatehotspots)
append_annotation_to_file(outf, ncols+nannotationcols, rows, annotations)
queries = []
rows = []
if len(queries) > 0:
annotations = pull_genomic_change_info(queries,annotatehotspots)
append_annotation_to_file(outf, ncols+nannotationcols, rows, annotations)
def process_hvsg(maffilereader, outf, maf_headers, alteration_column_names, ncols, nannotationcols, defaultCancerType, cancerTypeMap, annotatehotspots, default_reference_genome):
ihgvsg = geIndexOfHeader(maf_headers, alteration_column_names)
isample = geIndexOfHeader(maf_headers, SAMPLE_HEADERS)
icancertype = geIndexOfHeader(maf_headers, CANCER_TYPE_HEADERS)
ireferencegenome= geIndexOfHeader(maf_headers, REFERENCE_GENOME_HEADERS)
i = 0
queries = []
rows = []
for row in maffilereader:
i = i + 1
if i % POST_QUERIES_THRESHOLD_GC_HGVSG == 0:
log.info(i)
row = padrow(row, ncols)
sample = row[isample]
if sampleidsfilter and sample not in sampleidsfilter:
continue
hgvsg = get_cell_content(row, ihgvsg)
cancertype = get_tumor_type_from_row(row, i, defaultCancerType, icancertype, cancerTypeMap, sample)
reference_genome = get_reference_genome_from_row(get_cell_content(row, ireferencegenome), default_reference_genome)
if hgvsg is None:
if annotatehotspots:
default_cols = [['', '', GENE_IN_ONCOKB_DEFAULT, VARIANT_IN_ONCOKB_DEFAULT]]
else:
default_cols = [[GENE_IN_ONCOKB_DEFAULT, VARIANT_IN_ONCOKB_DEFAULT]]
append_annotation_to_file(outf, ncols + nannotationcols, [row],
default_cols)
else:
query = HGVSgQuery(hgvsg, cancertype, reference_genome)
queries.append(query)
rows.append(row)
if len(queries) == POST_QUERIES_THRESHOLD_GC_HGVSG:
annotations = pull_hgvsg_info(queries, annotatehotspots)
append_annotation_to_file(outf, ncols+nannotationcols, rows, annotations)
queries = []
rows = []
if len(queries) > 0:
annotations = pull_hgvsg_info(queries,annotatehotspots)
append_annotation_to_file(outf, ncols+nannotationcols, rows, annotations)
def getgenesfromfusion(fusion, nameregex=None):
GENES_REGEX = "([A-Za-z\d]+-[A-Za-z\d]+)" if nameregex is None else nameregex
searchresult = re.search(GENES_REGEX, fusion, flags=re.IGNORECASE)
gene1=None
gene2=None
if searchresult:
parts = searchresult.group(1).split("-")
gene1 = parts[0]
gene2 = gene1
if len(parts) > 1 and parts[1] != "intragenic":
gene2 = parts[1]
else:
gene1=gene2=fusion
return gene1, gene2
def processsv(svdata, outfile, previousoutfile, defaultCancerType, cancerTypeMap, nameregex):
if os.path.isfile(previousoutfile):
cacheannotated(previousoutfile, defaultCancerType, cancerTypeMap)
outf = open(outfile, 'w+')
with open(svdata, 'rU') as infile:
reader = csv.reader(infile, delimiter='\t')
headers = readheaders(reader)
ncols = headers["length"]
if ncols == 0:
return
outf.write(headers['^-$'])
outf.write("\t" + GENE_IN_ONCOKB_HEADER)
outf.write("\t" + VARIANT_IN_ONCOKB_HEADER)
outf.write("\tMUTATION_EFFECT")
outf.write("\tMUTATION_EFFECT_CITATIONS")
outf.write("\tONCOGENIC")
for l in levels:
outf.write('\t' + l)
outf.write("\tHIGHEST_LEVEL")
outf.write("\tTX_CITATIONS")
for l in dxLevels:
outf.write('\t' + l)
outf.write("\tHIGHEST_DX_LEVEL")
outf.write("\tDX_CITATIONS")
for l in pxLevels:
outf.write('\t' + l)
outf.write("\tHIGHEST_PX_LEVEL")
outf.write("\tPX_CITATIONS")
outf.write("\n")
newcols = ncols + 11 + len(levels) + len(dxLevels) + len(pxLevels)
igene1 = geIndexOfHeader(headers, ['GENE1'])
igene2 = geIndexOfHeader(headers, ['GENE2'])
ifusion = geIndexOfHeader(headers, FUSION_HEADERS)
isample = geIndexOfHeader(headers, SAMPLE_HEADERS)
icancertype = geIndexOfHeader(headers, CANCER_TYPE_HEADERS)
i = 0
queries = []
rows = []
for row in reader:
i = i + 1
if i % POST_QUERIES_THRESHOLD == 0:
log.info(i)
row = padrow(row, ncols)
sample = row[isample]
if sampleidsfilter and sample not in sampleidsfilter:
continue
gene1 = None
gene2 = None
if igene1 >= 0:
gene1 = row[igene1]
if igene2 >= 0:
gene2 = row[igene2]
if igene1 < 0 and igene2 < 0 and ifusion >= 0:
fusion = row[ifusion]
gene1, gene2 = getgenesfromfusion(fusion, nameregex)
cancertype = get_tumor_type_from_row(row, i, defaultCancerType, icancertype, cancerTypeMap, sample)
queries.append(StructuralVariantQuery(gene1, gene2, 'FUSION', cancertype))
rows.append(row)
if len(queries) == POST_QUERIES_THRESHOLD:
annotations = pull_structural_variant_info(queries)
append_annotation_to_file(outf, newcols, rows, annotations)
queries = []
rows = []
if len(queries) > 0:
annotations = pull_structural_variant_info(queries)
append_annotation_to_file(outf, newcols, rows, annotations)
outf.close()
def processcnagisticdata(cnafile, outfile, previousoutfile, defaultCancerType, cancerTypeMap, annotate_gain_loss=False):
CNA_AMPLIFICATION_TXT = 'Amplification'
CNA_DELETION_TXT = 'Deletion'
CNA_LOSS_TXT = 'Loss'
CNA_GAIN_TXT = 'Gain'
cnaEventMap = {
"-2": CNA_DELETION_TXT,
"-1.5": CNA_DELETION_TXT,
"2": CNA_AMPLIFICATION_TXT
}
if annotate_gain_loss:
cnaEventMap.update({
"-1": CNA_LOSS_TXT,
"1": CNA_GAIN_TXT
})
if os.path.isfile(previousoutfile):
cacheannotated(previousoutfile, defaultCancerType, cancerTypeMap)
outf = open(outfile, 'w+', 1000)
with open(cnafile, 'rU') as infile:
reader = csv.reader(infile, delimiter='\t')
headers = readheaders(reader)
samples = []
rawsamples = []
if headers["length"] != 0:
startofsamples = getfirstcolumnofsampleingisticdata(headers['^-$'].split('\t'))
rawsamples = headers['^-$'].split('\t')[startofsamples:]
for rs in rawsamples:
samples.append(rs)
if defaultCancerType == '' and not set(cancerTypeMap.keys()).issuperset(set(samples)):
log.info(
"Cancer type for all samples should be defined for a more accurate result\nsamples in cna file: %s\n" % (
samples))
outf.write('SAMPLE_ID\tCANCER_TYPE\tHUGO_SYMBOL\tALTERATION')
outf.write("\t"+GENE_IN_ONCOKB_HEADER)
outf.write("\t"+VARIANT_IN_ONCOKB_HEADER)
outf.write("\tMUTATION_EFFECT")
outf.write("\tMUTATION_EFFECT_CITATIONS")
outf.write("\tONCOGENIC")
for l in levels:
outf.write('\t' + l)
outf.write("\tHIGHEST_LEVEL")
outf.write("\tTX_CITATIONS")
for l in dxLevels:
outf.write('\t' + l)
outf.write("\tHIGHEST_DX_LEVEL")
outf.write("\tDX_CITATIONS")
for l in pxLevels:
outf.write('\t' + l)
outf.write("\tHIGHEST_PX_LEVEL")
outf.write("\tPX_CITATIONS")
outf.write("\n")
ncols = 15 + len(levels) + len(dxLevels) + len(pxLevels)
i = 0
rows = []
queries = []
for row in reader:
i = i + 1
if i % POST_QUERIES_THRESHOLD == 0:
log.info(i)
hugo = row[0]
if len(row) == 1:
log.warning("No CNA specified for gene " + hugo)
continue
for rawsample in rawsamples:
if rawsample in headers:
if len(row) <= headers[rawsample]:
log.warning('No CNA specified for ' + row[0] + ' ' + rawsample)
continue
cna = row[headers[rawsample]]
if cna in cnaEventMap:
cna_type = cnaEventMap[cna]
if cna_type is not None:
cancertype = defaultCancerType
sample = rawsample
if sampleidsfilter and sample not in sampleidsfilter:
continue
if sample in cancerTypeMap:
cancertype = cancerTypeMap[sample]
rows.append([sample, cancertype, hugo, cna_type])
queries.append(CNAQuery(hugo, cna_type, cancertype))
if len(queries) == POST_QUERIES_THRESHOLD:
annotations = pull_cna_info(queries)
append_annotation_to_file(outf, ncols, rows, annotations)
rows = []
queries = []
if len(queries) > 0:
annotations = pull_cna_info(queries)
append_annotation_to_file(outf, ncols, rows, annotations)
outf.close()
def getfirstcolumnofsampleingisticdata(headers):
header0 = headers[0].lower()
if header0 != "hugo_symbol" and header0 != "gene symbol":
log.info("Gistic data should start with Hugo_Symbol")
quit()
header1 = headers[1].lower()
if header1 != "entrez_gene_id" and header1 != "locus id":
return 1
header2 = headers[2].lower()
if header2 != "cytoband":
return 2
return 3
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def processclinicaldata(annotatedmutfiles, clinicalfile, outfile):
samplelevels = {}
sampledxlevels = {}
samplepxlevels = {}
sampleleveltreatments = {}
sampledrivers = {}
samplemutationswithdiagnosis = {}
samplemutationswithprognosis = {}
sampleactionablecount = {}
samplealterationcount = {}
for annotatedmutfile in annotatedmutfiles:
with open(annotatedmutfile, 'rU') as mutfile:
reader = csv.reader(mutfile, delimiter='\t')
headers = readheaders(reader)
ncols = headers["length"]
if ncols == 0:
return
igene1 = geIndexOfHeader(headers, ['GENE1'] + HUGO_HEADERS) # fusion
igene2 = geIndexOfHeader(headers, ['GENE2'] + HUGO_HEADERS) # fusion
ifusion = geIndexOfHeader(headers, ['FUSION'])
ihugo = geIndexOfHeader(headers, HUGO_HEADERS)
iconsequence = geIndexOfHeader(headers, CONSEQUENCE_HEADERS)
ihgvs = geIndexOfHeader(headers, HGVS_HEADERS)
isample = geIndexOfHeader(headers, SAMPLE_HEADERS)
istart = geIndexOfHeader(headers, PROTEIN_START_HEADERS)
iend = geIndexOfHeader(headers, PROTEIN_END_HEADERS)
icancertype = geIndexOfHeader(headers, CANCER_TYPE_HEADERS)
# imutationeffect = headers['MUTATION_EFFECT']
ioncogenic = headers['ONCOGENIC']
isfusion = (igene1 != -1 & igene2 != -1) or ifusion != -1
ismutorcna = ihugo != -1 & ihgvs != -1
if not isfusion and not ismutorcna:
log.error("missing proper header")
exit()
for row in reader:
row = padrow(row, ncols)
sample = row[isample]
oncogenic = ""
if ioncogenic < len(row):
oncogenic = row[ioncogenic].lower()
if sample not in samplelevels:
samplelevels[sample] = {}
sampledxlevels[sample] = []
samplepxlevels[sample] = []
sampleleveltreatments[sample] = {}
sampledrivers[sample] = []
sampleactionablecount[sample] = {}
if sample not in samplemutationswithdiagnosis:
samplemutationswithdiagnosis[sample] = []
if sample not in samplemutationswithprognosis:
samplemutationswithprognosis[sample] = []
if sample not in samplealterationcount:
samplealterationcount[sample] = 1
else:
samplealterationcount[sample] += 1
hugo = row[ihugo]
alteration = row[ihgvs]
gene1 = row[igene1]
gene2 = row[igene2]
variant = "NA"
if ismutorcna:
variant = hugo + " " + alteration
elif isfusion:
if ifusion != -1:
variant = row[ifusion]
else:
if gene1 == gene2:
variant = gene1 + " intragenic deletion"
else:
variant = gene1 + "-" + gene2 + " fusion"
if oncogenic == "oncogenic" or oncogenic == "likely oncogenic" or oncogenic == "predicted oncogenic":
sampledrivers[sample].append(variant)
for l in levels:
il = headers[l]
if il < len(row) and row[il] != '':
if l not in samplelevels[sample]:
samplelevels[sample][l] = []
sampleleveltreatments[sample][l] = []
samplelevels[sample][l].append(row[il] + "(" + variant + ")")
sampleleveltreatments[sample][l].extend(row[il].split(","))
if not l.startswith('LEVEL_R'):
sampleactionablecount[sample][variant] = True
for l in dxLevels:
il = headers[l]
if il < len(row) and row[il] != '':
if l not in samplelevels[sample]:
samplelevels[sample][l] = []
samplelevels[sample][l].append(row[il] + "(" + variant + ")")
for l in pxLevels:
il = headers[l]
if il < len(row) and row[il] != '':
if l not in samplelevels[sample]:
samplelevels[sample][l] = []
samplelevels[sample][l].append(row[il] + "(" + variant + ")")
ihighestdxlevel = geIndexOfHeader(headers, ['HIGHEST_DX_LEVEL'])
if ihighestdxlevel != -1:
if row[ihighestdxlevel] != '':
samplemutationswithdiagnosis[sample].append(variant)
sampledxlevels[sample].append(row[ihighestdxlevel])
ihighestpxlevel = geIndexOfHeader(headers, ['HIGHEST_PX_LEVEL'])
if ihighestpxlevel != -1:
if row[ihighestpxlevel] != '':
samplemutationswithprognosis[sample].append(variant)
samplepxlevels[sample].append(row[ihighestpxlevel])
outf = open(outfile, 'w+')
# export to anntoated file
with open(clinicalfile, 'rU') as clinfile:
reader = csv.reader(clinfile, delimiter='\t')
headers = readheaders(reader)
outf.write(headers['^-$'])
for l in levels:
outf.write('\t' + l)
outf.write('\tHIGHEST_LEVEL')
for l in dxLevels:
outf.write('\t' + l)
outf.write('\tHIGHEST_DX_LEVEL')
for l in pxLevels:
outf.write('\t' + l)
outf.write('\tHIGHEST_PX_LEVEL')
outf.write('\tONCOGENIC_MUTATIONS\t#ONCOGENIC_MUTATIONS\t#MUTATIONS_WITH_THERAPEUTIC_IMPLICATIONS\t#MUTATIONS_WITH_DIAGNOSTIC_IMPLICATIONS\t#MUTATIONS_WITH_PROGNOSTIC_IMPLICATIONS\t#MUTATIONS\n')
isample = headers['SAMPLE_ID']
for row in reader:
sample = row[isample]
if sampleidsfilter and sample not in sampleidsfilter:
continue
outf.write('\t'.join(row))
for l in levels:
outf.write('\t')
if sample in samplelevels and l in samplelevels[sample]:
outf.write(";".join(samplelevels[sample][l]))
highestlevel = ''
highestdxlevel = ''
highestpxlevel = ''
if sample in sampleleveltreatments:
highestlevel = gethighestsensitivitylevel(sampleleveltreatments[sample])
if sample in sampledxlevels:
highestdxlevel = gethighestDxPxlevel(dxLevels, sampledxlevels[sample])
if sample in samplepxlevels:
highestpxlevel = gethighestDxPxlevel(pxLevels, samplepxlevels[sample])
# if highestlevel == '':
# if sample in sampledrivers and len(sampledrivers[sample])>0:
# highestlevel = 'Oncogenic, no level'
# else:
# highestlevel = "VUS"
outf.write('\t' + highestlevel)
for l in dxLevels:
outf.write('\t')
if sample in samplelevels and l in samplelevels[sample]:
outf.write(";".join(samplelevels[sample][l]))
outf.write('\t' + highestdxlevel)
for l in pxLevels:
outf.write('\t')
if sample in samplelevels and l in samplelevels[sample]:
outf.write(";".join(samplelevels[sample][l]))
outf.write('\t' + highestpxlevel)
actionablecount = 0
if sample in sampleactionablecount:
actionablecount = len(sampleactionablecount[sample].keys())
alterationcount = 0
if sample in samplealterationcount:
alterationcount = samplealterationcount[sample]
drivercount = 0
diagnosiscount = 0
prognosiscount = 0
drivermutations = ""
if sample in sampledrivers:
drivercount = len(sampledrivers[sample])
drivermutations = ";".join(sampledrivers[sample])
if sample in samplemutationswithdiagnosis:
diagnosiscount = len(samplemutationswithdiagnosis[sample])
if sample in samplemutationswithprognosis:
prognosiscount = len(samplemutationswithprognosis[sample])
outf.write('\t' + drivermutations)
outf.write('\t' + str(drivercount))
outf.write('\t' + str(actionablecount))
outf.write('\t' + str(diagnosiscount))
outf.write('\t' + str(prognosiscount))
outf.write('\t' + str(alterationcount))
outf.write('\n')
outf.close()
def plotclinicalactionability(ax, annotatedclinicalfile, outfile, parameters):
if os.path.isfile(outfile):
os.remove(outfile)
extlevels = levels + ["ONCOGENIC", "VUS"]
if "levels" in parameters:
extlevels = parameters["levels"]
with open(annotatedclinicalfile, 'rU') as clinfile:
reader = csv.reader(clinfile, delimiter='\t')
headers = readheaders(reader)
isample = geIndexOfHeader(headers, SAMPLE_HEADERS)
ilevel = headers['HIGHEST_LEVEL']
ioncogenic = headers['ONCOGENIC_MUTATIONS']
icat = headers[parameters["catogerycolumn"].upper()] #e.g. "CANCER_TYPE"
catsamplecount = {}
catactionablesamplecount = {}
oncogenicsamplecount = {}
levelcatsamplecount = {}
for row in reader:
sample = row[isample]
if sampleidsfilter and sample not in sampleidsfilter:
continue
cat = row[icat]
if cat not in catsamplecount:
catsamplecount[cat] = 0
catsamplecount[cat] += 1
if cat not in catactionablesamplecount:
catactionablesamplecount[cat] = 0
oncogenicsamplecount[cat] = 0
level = row[ilevel]
oncogenic = row[ioncogenic]
exlevel = level
if level in extlevels:
catactionablesamplecount[cat] += 1
oncogenicsamplecount[cat] += 1
elif len(oncogenic.strip()) > 0:
oncogenicsamplecount[cat] += 1
exlevel = "ONCOGENIC"
else:
exlevel = "VUS"
if exlevel not in levelcatsamplecount:
levelcatsamplecount[exlevel] = {}
if cat not in levelcatsamplecount[exlevel]:
levelcatsamplecount[exlevel][cat] = 0
levelcatsamplecount[exlevel][cat] += 1
# plot
catarray = [] # cancer types
catactionabilityarray = [] # actionabiligy percentages per cancer type
catoncogenicarray = [] # actionabiligy percentages per cancer type
for cat in catsamplecount:
if catsamplecount[cat] >= parameters["thresholdcat"]:
catarray.append(cat)
catactionabilityarray.append(catactionablesamplecount[cat] * 100.0 / catsamplecount[cat])
catoncogenicarray.append(oncogenicsamplecount[cat] * 100.0 / catsamplecount[cat])
ncat = len(catarray)
order = reversed(sorted(range(ncat),key=lambda x:(catactionabilityarray[x],catoncogenicarray[x])))
drawplot(ax, 'OncoKB Actionability', extlevels, levelcatsamplecount, catarray, catsamplecount, order, parameters["thresholdcat"])
def plotimplications(ax, header, title, levels, annotatedclinicalfile, outfile, parameters):
if os.path.isfile(outfile):
os.remove(outfile)
extlevels = levels
if "levels" in parameters:
extlevels = parameters["levels"]
with open(annotatedclinicalfile, 'rU') as clinfile:
reader = csv.reader(clinfile, delimiter='\t')
headers = readheaders(reader)
isample = headers['SAMPLE_ID']
ilevel = headers[header]
icat = headers[parameters["catogerycolumn"].upper()]
catsamplecount = {}
catactionablesamplecount = {}
levelcatsamplecount = {}
for row in reader:
sample = row[isample]
if sampleidsfilter and sample not in sampleidsfilter:
continue
cat = row[icat]
if cat not in catsamplecount:
catsamplecount[cat] = 0
catsamplecount[cat] += 1
if cat not in catactionablesamplecount:
catactionablesamplecount[cat] = 0
level = row[ilevel]
exlevel = level
if level in extlevels:
catactionablesamplecount[cat] += 1
else:
exlevel = "Other"
if exlevel not in levelcatsamplecount:
levelcatsamplecount[exlevel] = {}
if cat not in levelcatsamplecount[exlevel]:
levelcatsamplecount[exlevel][cat] = 0
levelcatsamplecount[exlevel][cat] += 1
# plot
catarray = [] # cancer types
catactionabilityarray = [] # actionabiligy percentages per cancer type
for cat in catsamplecount:
if catsamplecount[cat] >= parameters["thresholdcat"]:
catarray.append(cat)
catactionabilityarray.append(catactionablesamplecount[cat] * 100.0 / catsamplecount[cat])
ncat = len(catarray)
order = reversed(sorted(range(ncat),key=lambda x:(catactionabilityarray[x])))
drawplot(ax, title, extlevels, levelcatsamplecount, catarray, catsamplecount, order, parameters["thresholdcat"])
def drawplot(ax, title, extlevels, levelcatsamplecount, catarray, catsamplecount, order, thresholdcat):
# level colors
levelcolors = {
'LEVEL_1': '#33A02C',
'LEVEL_2': '#1F78B4',
'LEVEL_3A': '#984EA3',
'LEVEL_3B': '#BE98CE',
'LEVEL_4': '#a8a8a8',
'LEVEL_R1': '#EE3424',
'LEVEL_R2': '#F79A92',
'LEVEL_R3': '#FCD6D3',
'LEVEL_Dx1': '#33A02C',
'LEVEL_Dx2': '#1F78B4',
'LEVEL_Dx3': '#984EA3',
'LEVEL_Px1': '#33A02C',
'LEVEL_Px2': '#1F78B4',
'LEVEL_Px3': '#984EA3',
'ONCOGENIC': '#ffdab9',
'VUS': '#d1d1d1',
'Other': 'grey'
}
# level legend
levellegend = {
'LEVEL_1': 'Level 1',
'LEVEL_2': 'Level 2',
'LEVEL_3A': 'Level 3A',
'LEVEL_3B': 'Level 3B',
'LEVEL_4': 'Level 4',
'LEVEL_R1': 'Level R1',
'LEVEL_R2': 'Level R2',
'LEVEL_R3': 'Level R3',
'LEVEL_Dx1': 'Level Dx1',
'LEVEL_Dx2': 'Level Dx2',
'LEVEL_Dx3': 'Level Dx3',
'LEVEL_Px1': 'Level Px1',
'LEVEL_Px2': 'Level Px2',
'LEVEL_Px3': 'Level Px3',
'ONCOGENIC': 'Oncogenic, no level',
'VUS': 'VUS',
'Other': 'Other'
}
ncat = len(catarray)
if ncat > 0:
catarray = [catarray[i] for i in order]
ind = range(ncat)
legends = []
plts = []
accumlevelcancerperc = [0] * ncat
for level in extlevels:
if level not in levelcatsamplecount:
continue
levelcancerperc = [0] * ncat
for k in ind:
cat = catarray[k]
if catsamplecount[cat] < thresholdcat:
continue
if cat in levelcatsamplecount[level]:
levelcancerperc[k] = levelcatsamplecount[level][cat] * 100.0 / catsamplecount[cat]
width = 0.75
plts = [ax.bar(ind, levelcancerperc, width, color=levelcolors[level], bottom=accumlevelcancerperc)] + plts
legends = [levellegend[level]] + legends
accumlevelcancerperc = list(map(sum, zip(accumlevelcancerperc,levelcancerperc)))
ax = plt.gca()
ax.set_axisbelow(True)
ax.set_aspect(0.1)
ax.tick_params(axis='y', which='major', labelsize=6)
ax.set_ylabel('% of samples', fontsize=6)
ax.set_title(title, fontsize=8)
ax.set_xticks([i+0.5 for i in ind])
ax.set_xticklabels(catarray, rotation=60, ha="right", fontsize=4)
# plt.yticks(np.arange(0, 81, 10))
ax.legend(plts, legends, fontsize=6, bbox_to_anchor=(1.01, 1), loc="upper left")
oncokbcache = {}
def cacheannotated(annotatedfile, defaultCancerType, cancerTypeMap):
with open(annotatedfile, 'rU') as infile:
try:
reader = csv.reader(infile, delimiter='\t')
headers = readheaders(reader)
ihugo = geIndexOfHeader(headers, HUGO_HEADERS)
iconsequence = geIndexOfHeader(headers, CONSEQUENCE_HEADERS)
ihgvs = geIndexOfHeader(headers, HGVS_HEADERS)
isample = geIndexOfHeader(headers, SAMPLE_HEADERS)
istart = geIndexOfHeader(headers, PROTEIN_START_HEADERS)
iend = geIndexOfHeader(headers, PROTEIN_END_HEADERS)
icancertype = geIndexOfHeader(headers, CANCER_TYPE_HEADERS)
imutationeffect = headers['MUTATION_EFFECT']
icitations = headers['CITATIONS']
ioncogenic = headers['ONCOGENIC']
igeneannotated = headers[GENE_IN_ONCOKB_HEADER]
ivariantannotated = headers[VARIANT_IN_ONCOKB_HEADER]
for row in reader:
try:
hugo = row[ihugo]
hgvs = row[ihgvs]
if hgvs.startswith('p.'):
hgvs = hgvs[2:]
sample = row[isample]
cancertype = defaultCancerType
if icancertype >= 0:
cancertype = row[icancertype]
if sample in cancerTypeMap:
cancertype = cancerTypeMap[sample]
key = '-'.join([hugo, hgvs, cancertype])
# oncokb = row[ioncokb]
oncokbcache[key] = {}
oncokbcache[key][GENE_IN_ONCOKB_HEADER] = row[igeneannotated]
oncokbcache[key][VARIANT_IN_ONCOKB_HEADER] = row[ivariantannotated]
oncokbcache[key]['mutation_effect'] = row[imutationeffect]
oncokbcache[key]['citations'] = row[icitations]
oncokbcache[key]['oncogenic'] = row[ioncogenic]
for l in levels:
il = headers[l]
if il < len(row):
oncokbcache[key][l] = row[il].split(',')
else:
oncokbcache[key][l] = []
except Exception:
pass
except Exception:
pass
def geIndexOfHeader(headers, keywords):
for k in keywords:
if k in headers:
return headers[k]
return -1
def pull3dhotspots(hugo, consequence, start, end):
try:
if hugo in _3dhotspots and consequence == "missense_variant":
for i in range(int(start), int(end) + 1):
if i in _3dhotspots[hugo]:
return "Y"
except TypeError:
log.error("%s: %s-%s" % (hugo, str(start), str(end)))
return ""
def appendoncokbcitations(citations, pmids, abstracts):
if citations is None:
citations = []
if pmids is not None:
for pmid in pmids:
if pmid not in citations:
citations.append(pmid)
if abstracts is not None:
for abstract in abstracts:
abstractStr = abstract['abstract'] + '(' + abstract['link'] + ')'
if abstractStr not in citations:
citations.append(abstractStr)
return citations
class Gene:
def __init__(self, hugo):
self.hugoSymbol = hugo
class ProteinChangeQuery:
def __init__(self, hugo, hgvs, cancertype, reference_genome=None, consequence=None, start=None, end=None):
self.gene = Gene(hugo)
self.alteration = hgvs
if consequence is not None:
self.consequence = consequence
if start is not None:
self.proteinStart = start
if end is not None:
self.proteinEnd = end
self.tumorType = cancertype
if reference_genome is not None:
self.referenceGenome = reference_genome.value
class HGVSgQuery:
def __init__(self, hgvsg, cancertype, reference_genome=None):
self.hgvsg = hgvsg
self.tumorType = cancertype
if reference_genome is not None:
self.referenceGenome = reference_genome.value
def gettumortypename(tumortype):
if 'code' in tumortype and tumortype['code'] is not None and tumortype['code'] != '':
return tumortype['code']
elif 'name' in tumortype and tumortype['name'] is not None and tumortype['name'] != '':
return tumortype['name']
else:
return tumortype['mainType']['name']
def getimplications(oncokbdata, implication_type, levels, implications):
citation_column_key = implication_type + '_citations'
for implication in implications:
oncokbdata[citation_column_key] = appendoncokbcitations(oncokbdata[citation_column_key], implication['pmids'],
implication['abstracts'])
level = implication['levelOfEvidence']
if level is not None:
if level not in levels:
log.info(level + " is ignored")
else:
if 'tumorType' in implication:
tumortypename = gettumortypename(implication['tumorType'])
if tumortypename not in oncokbdata[level]:
oncokbdata[level].append(tumortypename)
class GenomicChangeQuery:
def __init__(self, chromosome, start, end, ref_allele, var_allele, cancertype, reference_genome=None):
self.genomicLocation = ','.join([chromosome, start, end, ref_allele, var_allele])
self.tumorType = cancertype
if reference_genome is not None:
self.referenceGenome = reference_genome.value
class CNAQuery:
def __init__(self, hugo, cnatype, cancertype):
self.gene = Gene(hugo)
self.copyNameAlterationType = cnatype.upper()
self.tumorType = cancertype
class StructuralVariantQuery:
def __init__(self, hugoA, hugoB, structural_variant_type, cancertype):
# Assume all structural variants in the file are functional fusions
is_functional_fusion = True
if hugoA == hugoB:
is_functional_fusion = False
structural_variant_type = 'DELETION'
self.geneA = Gene(hugoA)
self.geneB = Gene(hugoB)
self.functionalFusion = is_functional_fusion
self.structuralVariantType = structural_variant_type.upper()
self.tumorType = cancertype
def pull_protein_change_info(queries, annotate_hotspot):
url = oncokbapiurl + '/annotate/mutations/byProteinChange'
response = makeoncokbpostrequest(url, queries)
if response.status_code == 401:
raise Exception('unauthorized')
annotation = []
if response.status_code == 200:
annotation = response.json()
else:
for query in queries:
geturl = url + '?'
geturl += 'hugoSymbol=' + query.gene.hugoSymbol
geturl += '&alteration=' + query.alteration
geturl += '&tumorType=' + query.tumorType
if hasattr(query, 'consequence') and query.consequence:
geturl += '&consequence=' + query.consequence
if hasattr(query, 'proteinStart') and query.proteinStart and query.proteinStart != '\\N' and query.proteinStart != 'NULL' and query.proteinStart != '':
geturl += '&proteinStart=' + str(query.proteinStart)
if hasattr(query, 'proteinEnd') and query.proteinEnd and query.proteinEnd != '\\N' and query.proteinEnd != 'NULL' and query.proteinEnd != '':
geturl += '&proteinEnd=' + str(query.proteinEnd)
getresponse = makeoncokbgetrequest(geturl)
if getresponse.status_code == 200:
annotation.append(getresponse.json())
else:
# if the api call fails, we should still push a None into the list
# to keep the same length of the queries
annotation.append(None)
processed_annotation = []
for query_annotation in annotation:
processed_annotation.append(process_oncokb_annotation(query_annotation, annotate_hotspot))
return processed_annotation
def pull_hgvsg_info(queries, annotate_hotspot):
url = oncokbapiurl + '/annotate/mutations/byHGVSg'
response = makeoncokbpostrequest(url, queries)
if response.status_code == 401:
raise Exception('unauthorized')
annotation = []
if response.status_code == 200:
annotation = response.json()
else:
for query in queries:
geturl = url + '?'
geturl += 'hgvsg=' + query.hgvsg
geturl += '&tumorType=' + query.tumorType
getresponse = makeoncokbgetrequest(geturl)
if getresponse.status_code == 200:
annotation.append(getresponse.json())
else:
# if the api call fails, we should still push a None into the list
# to keep the same length of the queries
print('Error on annotating the url ' + geturl)
annotation.append(None)
processed_annotation = []
for query_annotation in annotation:
processed_annotation.append(process_oncokb_annotation(query_annotation, annotate_hotspot))
return processed_annotation
def pull_genomic_change_info(queries, annotate_hotspot):
url = oncokbapiurl + '/annotate/mutations/byGenomicChange'
response = makeoncokbpostrequest(url, queries)
if response.status_code == 401:
raise Exception('unauthorized')
annotation = []
if response.status_code == 200:
annotation = response.json()
else:
for query in queries:
geturl = url + '?'
geturl += 'genomicLocation=' + query.genomicLocation
geturl += '&tumorType=' + query.tumorType
getresponse = makeoncokbgetrequest(geturl)
if getresponse.status_code == 200:
annotation.append(getresponse.json())
else:
# if the api call fails, we should still push a None into the list
# to keep the same length of the queries
print('Error on annotating the url ' + geturl)
annotation.append(None)
processed_annotation = []
for query_annotation in annotation:
processed_annotation.append(process_oncokb_annotation(query_annotation, annotate_hotspot))
return processed_annotation
def pull_cna_info(queries):
url = oncokbapiurl + '/annotate/copyNumberAlterations'
response = makeoncokbpostrequest(url, queries)
if response.status_code == 401:
raise Exception('unauthorized')
annotation = []
if response.status_code == 200:
annotation = response.json()
else:
for query in queries:
geturl = url + '?'
geturl += 'hugoSymbol=' + query.gene.hugoSymbol
geturl += '©NameAlterationType=' + query.copyNameAlterationType
geturl += '&tumorType=' + query.tumorType
getresponse = makeoncokbgetrequest(geturl)
if getresponse.status_code == 200:
annotation.append(getresponse.json())
else:
# if the api call fails, we should still push a None into the list
# to keep the same length of the queries
print('Error on annotating the url ' + geturl)
annotation.append(None)
processed_annotation = []
for query_annotation in annotation:
processed_annotation.append(process_oncokb_annotation(query_annotation, annotate_hotspot=False))
return processed_annotation
def pull_structural_variant_info(queries):
url = oncokbapiurl + '/annotate/structuralVariants'
response = makeoncokbpostrequest(url, queries)
if response.status_code == 401:
raise Exception('unauthorized')
annotation = []
if response.status_code == 200:
annotation = response.json()
else:
for query in queries:
geturl = url + '?'
geturl += 'hugoSymbolA=' + query.geneA.hugoSymbol
geturl += '&hugoSymbolB=' + query.geneB.hugoSymbol
geturl += '&structuralVariantType=' + query.structuralVariantType
geturl += '&isFunctionalFusion=' + str(query.functionalFusion).upper() if type(query.functionalFusion) is bool else query.functionalFusion
geturl += '&tumorType=' + query.tumorType
getresponse = makeoncokbgetrequest(geturl)
if getresponse.status_code == 200:
annotation.append(getresponse.json())
else:
# if the api call fails, we should still push a None into the list
# to keep the same length of the queries
print('Error on annotating the url ' + geturl)
annotation.append(None)
processed_annotation = []
for query_annotation in annotation:
processed_annotation.append(process_oncokb_annotation(query_annotation, annotate_hotspot=False))
return processed_annotation
def process_oncokb_annotation(annotation, annotate_hotspot):
if annotation is None:
return None
oncokbdata = {}
for l in levels:
oncokbdata[l] = []
for l in dxLevels:
oncokbdata[l] = []
for l in pxLevels:
oncokbdata[l] = []
oncokbdata[GENE_IN_ONCOKB_HEADER] = GENE_IN_ONCOKB_DEFAULT
oncokbdata[VARIANT_IN_ONCOKB_HEADER] = VARIANT_IN_ONCOKB_DEFAULT
oncokbdata['mutation_effect'] = ""
oncokbdata['mutation_effect_citations'] = []
oncokbdata['citations'] = []
oncokbdata['oncogenic'] = ""
oncokbdata['tx_citations'] = []
oncokbdata['dx_citations'] = []
oncokbdata['px_citations'] = []
try:
# oncogenic
oncokbdata[GENE_IN_ONCOKB_HEADER] = GENE_IN_ONCOKB_DEFAULT if annotation['geneExist'] is None else str(annotation['geneExist'])
oncokbdata[VARIANT_IN_ONCOKB_HEADER] = VARIANT_IN_ONCOKB_DEFAULT if annotation['variantExist'] is None else str(annotation['variantExist'])
# oncogenic
oncokbdata['oncogenic'] = annotation['oncogenic']
# if not evidences['geneExist'] or (not evidences['variantExist'] and not evidences['alleleExist']):
# return ''
# mutation effect
if (annotation['mutationEffect'] is not None):
oncokbdata['mutation_effect'] = annotation['mutationEffect']['knownEffect']
oncokbdata['mutation_effect_citations'] = appendoncokbcitations(oncokbdata['mutation_effect_citations'],
annotation['mutationEffect']['citations']['pmids'],
annotation['mutationEffect']['citations']['abstracts'])
# oncogenic
oncokbdata['oncogenic'] = annotation['oncogenic']
# get treatment
for treatment in annotation['treatments']:
level = treatment['level']
if level not in levels:
log.info("%s is ignored" % level)
# oncokbdata[level].append('')
else:
drugs = treatment['drugs']
oncokbdata['tx_citations'] = appendoncokbcitations(oncokbdata['tx_citations'], treatment['pmids'],
treatment['abstracts'])
if len(drugs) == 0:
oncokbdata[level].append('[NOT SPECIFIED]')
else:
drugnames = []
for drug in drugs:
drugnames.append(drug['drugName'])
treatmentname = '+'.join(drugnames)
if treatmentname not in oncokbdata[level]:
oncokbdata[level].append('+'.join(drugnames))
if annotation['diagnosticImplications'] is not None:
getimplications(oncokbdata, 'dx', dxLevels, annotation['diagnosticImplications'])
if annotation['prognosticImplications'] is not None:
getimplications(oncokbdata, 'px', pxLevels, annotation['prognosticImplications'])
oncokbdata['highestDiagnosticImplicationLevel'] = annotation['highestDiagnosticImplicationLevel']
oncokbdata['highestPrognosticImplicationLevel'] = annotation['highestPrognosticImplicationLevel']
except:
log.error("error when processing %s " % annotation)
# sys.exit()
ret = []
if annotate_hotspot:
if annotation['hotspot']:
ret.append('Y')
else:
ret.append('')
_3dhotspot = pull3dhotspots(annotation['query']['hugoSymbol'], annotation['query']['consequence'], annotation['query']['proteinStart'], annotation['query']['proteinEnd'])
ret.append(_3dhotspot)
ret.append(oncokbdata[GENE_IN_ONCOKB_HEADER])
ret.append(oncokbdata[VARIANT_IN_ONCOKB_HEADER])
ret.append(oncokbdata['mutation_effect'])
ret.append(';'.join(oncokbdata['mutation_effect_citations']))
ret.append(oncokbdata['oncogenic'])
for l in levels:
ret.append(','.join(oncokbdata[l]))
ret.append(gethighestsensitivitylevel(oncokbdata))
ret.append(';'.join(oncokbdata['tx_citations']))
for l in dxLevels:
ret.append(','.join(oncokbdata[l]))
ret.append(gethighestDxPxlevel(dxLevels, [oncokbdata['highestDiagnosticImplicationLevel']]))
ret.append(';'.join(oncokbdata['dx_citations']))
for l in pxLevels:
ret.append(','.join(oncokbdata[l]))
ret.append(gethighestDxPxlevel(pxLevels, [oncokbdata['highestPrognosticImplicationLevel']]))
ret.append(';'.join(oncokbdata['px_citations']))
return ret
def gethighestsensitivitylevel(oncokbdata):
r1 = set()
if "LEVEL_R1" in oncokbdata:
r1 = set(oncokbdata["LEVEL_R1"])
for l in levels:
if l.startswith("LEVEL_R") or l not in oncokbdata or oncokbdata[l] == '':
continue
if not r1.issuperset(set(oncokbdata[l])):
return l
return ""
def gethighestDxPxlevel(levels, oncokbdata):
for l in levels:
if l not in oncokbdata:
continue
return l
return ""
def gettreatments(evidence):
treatments = []
for t in evidence['treatments']:
drugs = []
for d in t['drugs']:
drugs.append(d['drugName'])
treatments.append('+'.join(drugs))
return treatments
def readCancerTypes(clinicalFile, data):
with open(clinicalFile, 'rU') as infile:
reader = csv.reader(infile, delimiter='\t')
headers = readheaders(reader)
iSample = geIndexOfHeader(headers, ['SAMPLE_ID'])
iCancerType = geIndexOfHeader(headers, ['ONCOTREE_CODE', 'CANCER_TYPE'])
for row in reader:
data[row[iSample]] = row[iCancerType]
return data
def readheaders(reader):
headers = {}
headers["length"] = 0
for row in reader:
if not row[0].startswith("#"):
headers["^-$"] = '\t'.join(row) # the whole line
headers["length"] = len(row)
i = 0
for h in row:
h=h.strip()
headers[h.upper()] = i
headers[h] = i
i = i + 1
break
return headers
def padrow(row, n):
nr = len(row)
if nr == n:
return row
if nr < n:
return row + [""] * (n - len(row))
else: # nr<n
return row[0:n]
```
#### File: bmi-214-final-project/src/load_cBioPortal_data.py
```python
import sys
from typing import Optional
import pandas as pd
from bravado.client import SwaggerClient
import api_tools as apit
import access_cBioPortal as cbio
def load_mutations(
client: SwaggerClient,
*,
save_path: str,
gene_path: Optional[str] = None
):
"""Load patient mutation data from cBioPortal.
Parameters
----------
client : SwaggerClient
Swagger API client accessing cBioPortal
save_path : str
Path at which to save table of mutation data
gene_path : Optional[str]
Path to TSV file containing gene set (name in column 0, entrez gene ID
in column 1) to which mutation data will be filtered (default = None);
if None, mutation data will not be filtered and all variants will be
maintained
"""
if gene_path is not None:
risk_genes = pd.read_csv(gene_path, sep="\t", header=0, usecols=[0, 1])
gene_list = risk_genes.iloc[:, 1].to_numpy().astype(int)
else:
gene_list = None
cbio.get_all_from_bioportal_endpoint(
client, "Mutations", out_path=save_path, gene_list=gene_list
)
def main(save_path: str, gene_path: Optional[str] = None):
"""Save patient mutation data from cBioPortal.
Parameters
----------
save_path : str
Path at which to save table of mutation data
gene_path : Optional[str]
Path to TSV file containing gene set (name in column 0, entrez gene ID
in column 1) to which mutation data will be filtered (default = None);
if None, mutation data will not be filtered and all variants will be
maintained
"""
cBioPortal_api_url = "https://www.cbioportal.org/api/api-docs"
client = apit.get_swagger_api_client(url=cBioPortal_api_url)
load_mutations(client=client, save_path=save_path, gene_path=gene_path)
if __name__ == "__main__":
sys_args = sys.argv
if len(sys_args) == 2:
main(save_path=sys_args[1])
elif len(sys_args) == 3:
main(save_path=sys_args[1], gene_path=sys_args[2])
else:
raise ValueError(
"Please specify an output path at which to save mutation data. "
"Optionally, you may also specify a path to a gene list to include."
)
```
#### File: src/oncokb_annotator/FusionAnnotator.py
```python
import argparse
from src.oncokb_annotator.AnnotatorCore import *
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('FusionAnnotator')
def main(argv):
if argv.help:
log.info('\n'
'FusionAnnotator.py -i <input Fusion file> -o <output Fusion file> [-p previous results] [-c <input clinical file>] [-s sample list filter] [-t <default tumor type>] [-u <oncokb api url>] [-b <oncokb api bear token>] [-r <structural variant name format, default: [A-Za-z\d]+-[A-Za-z\d]+>]\n'
' Essential Fusion columns (case insensitive):\n'
' HUGO_SYMBOL: Hugo gene symbol\n'
' VARIANT_CLASSIFICATION: Translational effect of variant allele\n'
' TUMOR_SAMPLE_BARCODE: sample ID\n'
' FUSION: amino acid change, e.g. "TMPRSS2-ERG"\n'
' Essential clinical columns:\n'
' SAMPLE_ID: sample ID\n'
' ONCOTREE_CODE: tumor type code from oncotree (oncotree.mskcc.org)\n'
' Cancer type will be assigned based on the following priority:\n'
' 1) ONCOTREE_CODE in clinical data file\n'
' 2) ONCOTREE_CODE exist in Fusion\n'
' 3) default tumor type (-t)\n'
' Default OncoKB base url is https://www.oncokb.org')
sys.exit()
if argv.input_file == '' or argv.output_file == '' or argv.oncokb_api_bearer_token == '':
log.info('for help: python FusionAnnotator.py -h')
sys.exit(2)
if argv.sample_ids_filter:
setsampleidsfileterfile(argv.sample_ids_filter)
if argv.cancer_hotspots_base_url:
setcancerhotspotsbaseurl(argv.cancer_hotspots_base_url)
if argv.oncokb_api_url:
setoncokbbaseurl(argv.oncokb_api_url)
setoncokbapitoken(argv.oncokb_api_bearer_token)
cancertypemap = {}
if argv.input_clinical_file:
readCancerTypes(argv.input_clinical_file, cancertypemap)
log.info('annotating %s ...' % argv.input_file)
processsv(argv.input_file, argv.output_file, argv.previous_result_file, argv.default_cancer_type,
cancertypemap, argv.structural_variant_name_format)
log.info('done!')
if __name__ == "__main__":
parser = argparse.ArgumentParser(add_help=False)
# ArgumentParser doesn't accept "store_true" and "type=" at the same time.
parser.add_argument('-h', dest='help', action="store_true", default=False)
parser.add_argument('-i', dest='input_file', default='', type=str)
parser.add_argument('-o', dest='output_file', default='', type=str)
parser.add_argument('-p', dest='previous_result_file', default='', type=str)
parser.add_argument('-c', dest='input_clinical_file', default='', type=str)
parser.add_argument('-s', dest='sample_ids_filter', default=None, type=str)
parser.add_argument('-t', dest='default_cancer_type', default='', type=str)
parser.add_argument('-u', dest='oncokb_api_url', default='', type=str)
parser.add_argument('-v', dest='cancer_hotspots_base_url', default='', type=str)
parser.add_argument('-b', dest='oncokb_api_bearer_token', default='', type=str)
parser.add_argument('-r', dest='structural_variant_name_format', default=None, type=str)
parser.set_defaults(func=main)
args = parser.parse_args()
args.func(args)
```
#### File: src/oncokb_annotator/OncoKBPlots.py
```python
import argparse
from AnnotatorCore import *
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('OncoKBPlots')
import matplotlib.pyplot as plt
def main(argv):
params = {
"catogerycolumn": argv.catogery_column, # -c
"thresholdcat": argv.threshold_cat, # -n
}
if argv.help:
log.info('\n'
'OncoKBPlots.py -i <annotated clinical file> -o <output PDF file> [-c <categorization column, '
'e.g. CANCER_TYPE>] [-s sample list filter] [-n threshold of # samples in a category] [-l comma separated levels to include]\n'
' Essential clinical columns:\n'
' SAMPLE_ID: sample ID\n'
' HIGHEST_LEVEL: Highest OncoKB levels\n'
' Supported levels (-l): \n'
' LEVEL_1,LEVEL_2,LEVEL_3A,LEVEL_3B,LEVEL_4,ONCOGENIC,VUS')
sys.exit()
if argv.input_file == '' or argv.output_file == '':
log.info('for help: python OncoKBPlots.py -h')
sys.exit(2)
if argv.sample_ids_filter:
setsampleidsfileterfile(argv.sample_ids_filter)
if argv.levels:
params["levels"] = re.split(',', argv.levels)
log.info('annotating %s ...' % argv.input_file)
fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
plotclinicalactionability(ax1, argv.input_file, argv.output_file, params)
# ax.yaxis.grid(linestyle="dotted", color="lightgray") # horizontal lines
# plt.margins(0.01)
plotclinicalactionability(ax1, args.input_file, args.output_file, params)
plotimplications(ax2, 'HIGHEST_DX_LEVEL', 'OncoKB Diagnostic Implications', dxLevels, args.input_file, argv.output_file, params)
plotimplications(ax3, 'HIGHEST_PX_LEVEL', 'OncoKB Prognostic Implications', pxLevels, args.input_file, argv.output_file, params)
plt.subplots_adjust(left=0.2, bottom=0.3)
plt.gcf().text(0.90, 0.1, "Generated by OncoKB\n[Chakravarty et al., JCO PO 2017]", fontsize=6,
horizontalalignment='right', verticalalignment='bottom')
fig.tight_layout()
fig.savefig(argv.output_file, bbox_inches='tight')
log.info('done!')
if __name__ == "__main__":
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-h', dest='help', action="store_true", default=False)
parser.add_argument('-i', dest='input_file', default='', type=str)
parser.add_argument('-o', dest='output_file', default='', type=str)
parser.add_argument('-c', dest='catogery_column', default='CANCER_TYPE', type=str)
parser.add_argument('-s', dest='sample_ids_filter', default='', type=str)
parser.add_argument('-n', dest='threshold_cat', default=0, type=int)
parser.add_argument('-l', dest='levels', default='', type=str)
parser.set_defaults(func=main)
args = parser.parse_args()
args.func(args)
``` |
{
"source": "jmickela/stalkexchange",
"score": 2
} |
#### File: stalkexchange/produce/models.py
```python
from django.db import models
from django.utils.translation import ugettext as _
from django.conf import settings
class ProduceType(models.Model):
title = models.CharField(_('Type'), max_length=255)
description = models.TextField(_('Description'))
image = models.ImageField(_('Image'), blank=True)
def __unicode__(self):
return self.title
class GardenItem(models.Model):
QUANTITY_LITTLE = 0
QUANTITY_LOT = 10
quantity_choices = (
(QUANTITY_LITTLE, _("A little")),
(QUANTITY_LOT, _("A lot")),
)
SIZE_BIG = 0
SIZE_MEDIUM = 1
SIZE_SMALL = 2
size_choices = (
(SIZE_BIG, _('Big')),
(SIZE_MEDIUM, _('Medium')),
(SIZE_SMALL, _('Small'))
)
owner = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="garden_items")
produce = models.ForeignKey(ProduceType)
quantity = models.IntegerField(_('Quantity'), help_text=_('How many do you have?'), choices=quantity_choices)
is_organic = models.BooleanField(_('Is Organic?'), default=False)
size = models.IntegerField(_('Size'), choices=size_choices, help_text=_('How big is this item?'))
description = models.TextField(_('Description'), help_text=_('Extra information...'), blank=True)
image = models.ImageField(_('Image'), blank=True)
```
#### File: stalkexchange/wishlist/views.py
```python
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from .models import WishlistItem
from .forms import WishlistForm
@login_required
def wishlist_add_item(request):
if request.method == "POST":
form = WishlistForm(request.POST)
if form.is_valid():
item = form.save(commit=False)
item.owner = request.user
if WishlistItem.objects.filter(owner=request.user, produce=item.produce).count() > 0:
return redirect("home")
item.save()
return redirect("home")
else:
form = WishlistForm()
return render(request, "wishlist_add_item.html", {'form': form})
# @login_required
# def wishlist_edit_item(request, item_id):
# item = WishlistItem.objects.get(item_id)
# form = None
#
# if request.user.pk != item.owner.pk:
# return redirect("home")
#
# if request.method == "POST":
# form = WishlistForm(request.POST, instance=item)
# if form.is_valid():
# form.save()
# return redirect("home")
# else:
# form = WishlistForm(instance=item)
# return render(request, "wishlist_edit_item.html", {'form': form})
@login_required
def remove_wishlist_item(request, item_id=None):
if item_id is None:
return redirect("home")
item = WishlistItem.objects.get(pk=item_id)
if request.user.pk != item.owner.pk:
return redirect("home")
if request.method == "POST":
item.delete()
return redirect("home")
return render(request, "wishlist_remove_confirm.html")
``` |
{
"source": "jmickela/stone",
"score": 2
} |
#### File: stone/app/tests.py
```python
from needle.cases import NeedleTestCase
from needle.driver import NeedlePhantomJS
class SomeTest(NeedleTestCase):
engine_class = 'needle.engines.perceptualdiff_engine.Engine'
@classmethod
def get_web_driver(cls):
return NeedlePhantomJS()
def test_image(self):
self.set_viewport_size(width=2000, height=768)
self.driver.get('http://ncyl-dev.rootid.in/')
self.assertScreenshot('.home-content', 'google-logo')
``` |
{
"source": "JMIdeaMaker/django-magicauth",
"score": 2
} |
#### File: django-magicauth/magicauth/forms.py
```python
from django import forms
from django.contrib.auth import get_user_model
from django.utils.module_loading import import_string
from django.core.validators import RegexValidator
from django.core.exceptions import ValidationError
from django_otp import user_has_device, devices_for_user
from magicauth import settings as magicauth_settings
from magicauth.models import MagicToken
email_unknown_callback = import_string(magicauth_settings.EMAIL_UNKNOWN_CALLBACK)
class EmailForm(forms.Form):
email = forms.EmailField()
def clean_email(self):
user_email = self.cleaned_data["email"]
user_email = user_email.lower()
email_field = magicauth_settings.EMAIL_FIELD
field_lookup = {f"{email_field}__iexact": user_email}
if not get_user_model().objects.filter(**field_lookup).exists():
email_unknown_callback(user_email)
return user_email
class OTPForm(forms.Form):
OTP_NUM_DIGITS = magicauth_settings.OTP_NUM_DIGITS
otp_token = forms.CharField(
max_length=OTP_NUM_DIGITS,
min_length=OTP_NUM_DIGITS,
validators=[RegexValidator(r"^\d{6}$")],
label=f"Entrez le code à {OTP_NUM_DIGITS} chiffres généré par votre téléphone ou votre carte OTP",
widget=forms.TextInput(attrs={"autocomplete": "off"}),
)
def __init__(self, user, *args, **kwargs):
super(OTPForm, self).__init__(*args, **kwargs)
self.user = user
def clean_otp_token(self):
otp_token = self.cleaned_data["otp_token"]
user = self.user
if not user_has_device(user):
raise ValidationError("Le système n'a pas trouvé d'appareil (carte OTP ou générateur sur téléphone) pour votre compte. Contactez le support pour en ajouter un.")
for device in devices_for_user(user):
if device.verify_is_allowed() and device.verify_token(otp_token):
return otp_token
raise ValidationError("Ce code n'est pas valide.")
```
#### File: django-magicauth/magicauth/send_token.py
```python
import math
from django.contrib.auth import get_user_model
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import send_mail
from django.template import loader
from magicauth import settings as magicauth_settings
from django.conf import settings as django_settings
from magicauth.models import MagicToken
import sendgrid
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
sg = sendgrid.SendGridAPIClient(django_settings.SENDGRID_API_KEY)
class SendTokenMixin(object):
"""
Helper for sending an email containing a link containing the MagicToken.
"""
def create_token(self, user):
token = MagicToken.objects.create(user=user)
return token
def get_user_from_email(self, user_email):
"""
Query the DB for the user corresponding to the email.
- We use get_user_model() instead of User (in case the Django app has customised the User
class)
- We use magicauth_settings.EMAIL_FIELD, which is the name of the field in the user
model. By default "username" but not always.
"""
user_class = get_user_model()
email_field = magicauth_settings.EMAIL_FIELD
field_lookup = {f"{email_field}__iexact": user_email}
user = user_class.objects.get(**field_lookup)
return user
def send_email(self, user, user_email, token, extra_context=None):
email_subject = magicauth_settings.EMAIL_SUBJECT
html_template = magicauth_settings.EMAIL_HTML_TEMPLATE
text_template = magicauth_settings.EMAIL_TEXT_TEMPLATE
from_email = magicauth_settings.FROM_EMAIL
context = {
"token": token,
"user": user,
"site": get_current_site(self.request),
"TOKEN_DURATION_MINUTES": math.floor(magicauth_settings.TOKEN_DURATION_SECONDS / 60),
"TOKEN_DURATION_SECONDS": magicauth_settings.TOKEN_DURATION_SECONDS,
}
if extra_context:
context.update(extra_context)
text_message = loader.render_to_string(text_template, context)
html_message = loader.render_to_string(html_template, context)
mail = Mail(
from_email=(
django_settings.MAGICAUTH_FROM_EMAIL,
django_settings.MAGICAUTH_SENDER
),
to_emails=[user_email],
subject=email_subject,
html_content=html_message
)
sg.send(mail)
def send_token(self, user_email, extra_context=None):
user = self.get_user_from_email(user_email)
token = self.create_token(user)
self.send_email(user, user_email, token, extra_context)
``` |
{
"source": "jmidyet/python-docx",
"score": 2
} |
#### File: docx/oxml/section.py
```python
from __future__ import absolute_import, print_function
from copy import deepcopy
from ..enum.header import WD_HEADER_FOOTER
from ..enum.section import WD_ORIENTATION, WD_SECTION_START
from .simpletypes import (
ST_RelationshipId, ST_SignedTwipsMeasure, ST_TwipsMeasure
)
from .xmlchemy import (
BaseOxmlElement, OptionalAttribute, RequiredAttribute, ZeroOrMore,
ZeroOrOne
)
class CT_HdrFtrRef(BaseOxmlElement):
"""
`w:headerReference` and `w:footerReference` elements, specifying the
various headers and footers for a section.
"""
rId = RequiredAttribute('r:id', ST_RelationshipId)
class CT_PageMar(BaseOxmlElement):
"""
``<w:pgMar>`` element, defining page margins.
"""
top = OptionalAttribute('w:top', ST_SignedTwipsMeasure)
right = OptionalAttribute('w:right', ST_TwipsMeasure)
bottom = OptionalAttribute('w:bottom', ST_SignedTwipsMeasure)
left = OptionalAttribute('w:left', ST_TwipsMeasure)
header = OptionalAttribute('w:header', ST_TwipsMeasure)
footer = OptionalAttribute('w:footer', ST_TwipsMeasure)
gutter = OptionalAttribute('w:gutter', ST_TwipsMeasure)
class CT_PageSz(BaseOxmlElement):
"""
``<w:pgSz>`` element, defining page dimensions and orientation.
"""
w = OptionalAttribute('w:w', ST_TwipsMeasure)
h = OptionalAttribute('w:h', ST_TwipsMeasure)
orient = OptionalAttribute(
'w:orient', WD_ORIENTATION, default=WD_ORIENTATION.PORTRAIT
)
class CT_SectPr(BaseOxmlElement):
"""
``<w:sectPr>`` element, the container element for section properties.
"""
_tag_seq = (
'w:headerReference', 'w:footerReference', 'w:footnotePr',
'w:endnotePr', 'w:type', 'w:pgSz', 'w:pgMar', 'w:paperSrc',
'w:pgBorders', 'w:lnNumType', 'w:pgNumType', 'w:cols', 'w:formProt',
'w:vAlign', 'w:noEndnote', 'w:titlePg', 'w:textDirection', 'w:bidi',
'w:rtlGutter', 'w:docGrid', 'w:printerSettings', 'w:sectPrChange',
)
headerReference = ZeroOrMore('w:headerReference', successors=_tag_seq[1:])
type = ZeroOrOne('w:type', successors=_tag_seq[5:])
pgSz = ZeroOrOne('w:pgSz', successors=_tag_seq[6:])
pgMar = ZeroOrOne('w:pgMar', successors=_tag_seq[7:])
del _tag_seq
@property
def bottom_margin(self):
"""
The value of the ``w:bottom`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.bottom
@bottom_margin.setter
def bottom_margin(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.bottom = value
def clone(self):
"""
Return an exact duplicate of this ``<w:sectPr>`` element tree
suitable for use in adding a section break. All rsid* attributes are
removed from the root ``<w:sectPr>`` element.
"""
clone_sectPr = deepcopy(self)
clone_sectPr.attrib.clear()
return clone_sectPr
@property
def footer(self):
"""
The value of the ``w:footer`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.footer
@footer.setter
def footer(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.footer = value
def get_headerReference_of_type(self, type_member):
"""
Return the `w:headerReference` child having type attribute value
associated with *type_member*, or |None| if not present.
"""
type_str = WD_HEADER_FOOTER.to_xml(type_member)
matches = self.xpath('w:headerReference[@w:type="%s"]' % type_str)
if matches:
return matches[0]
return None
@property
def gutter(self):
"""
The value of the ``w:gutter`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.gutter
@gutter.setter
def gutter(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.gutter = value
@property
def header(self):
"""
The value of the ``w:header`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.header
@header.setter
def header(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.header = value
@property
def left_margin(self):
"""
The value of the ``w:left`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.left
@left_margin.setter
def left_margin(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.left = value
@property
def right_margin(self):
"""
The value of the ``w:right`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.right
@right_margin.setter
def right_margin(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.right = value
@property
def orientation(self):
"""
The member of the ``WD_ORIENTATION`` enumeration corresponding to the
value of the ``orient`` attribute of the ``<w:pgSz>`` child element,
or ``WD_ORIENTATION.PORTRAIT`` if not present.
"""
pgSz = self.pgSz
if pgSz is None:
return WD_ORIENTATION.PORTRAIT
return pgSz.orient
@orientation.setter
def orientation(self, value):
pgSz = self.get_or_add_pgSz()
pgSz.orient = value
@property
def page_height(self):
"""
Value in EMU of the ``h`` attribute of the ``<w:pgSz>`` child
element, or |None| if not present.
"""
pgSz = self.pgSz
if pgSz is None:
return None
return pgSz.h
@page_height.setter
def page_height(self, value):
pgSz = self.get_or_add_pgSz()
pgSz.h = value
@property
def page_width(self):
"""
Value in EMU of the ``w`` attribute of the ``<w:pgSz>`` child
element, or |None| if not present.
"""
pgSz = self.pgSz
if pgSz is None:
return None
return pgSz.w
@page_width.setter
def page_width(self, value):
pgSz = self.get_or_add_pgSz()
pgSz.w = value
@property
def start_type(self):
"""
The member of the ``WD_SECTION_START`` enumeration corresponding to
the value of the ``val`` attribute of the ``<w:type>`` child element,
or ``WD_SECTION_START.NEW_PAGE`` if not present.
"""
type = self.type
if type is None or type.val is None:
return WD_SECTION_START.NEW_PAGE
return type.val
@start_type.setter
def start_type(self, value):
if value is None or value is WD_SECTION_START.NEW_PAGE:
self._remove_type()
return
type = self.get_or_add_type()
type.val = value
@property
def top_margin(self):
"""
The value of the ``w:top`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.top
@top_margin.setter
def top_margin(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.top = value
class CT_SectType(BaseOxmlElement):
"""
``<w:sectType>`` element, defining the section start type.
"""
val = OptionalAttribute('w:val', WD_SECTION_START)
```
#### File: docx/parts/header.py
```python
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from ..header import HeaderFooterBody
from ..opc.part import XmlPart
class HeaderPart(XmlPart):
@property
def body(self):
"""
A |HeaderFooterBody| proxy object for the `w:hdr` element in this part,
"""
# TODO write CT_HeaderFooter
# element = CT_HeaderFooter(self.element)
# how to access parent here? is it necessary?
return HeaderFooterBody(self.element, None)
```
#### File: python-docx/tests/test_section.py
```python
from __future__ import absolute_import, print_function, unicode_literals
import pytest
from docx.enum.header import WD_HEADER_FOOTER
from docx.enum.section import WD_ORIENT, WD_SECTION
from docx.header import Header
from docx.section import Section, Sections
from docx.shared import Inches
from .unitutil.cxml import element, xml
from .unitutil.mock import class_mock, instance_mock
class DescribeSections(object):
def it_knows_how_many_sections_it_contains(self, len_fixture):
sections, expected_len = len_fixture
assert len(sections) == expected_len
def it_can_iterate_over_its_Section_instances(self, iter_fixture):
sections, expected_count = iter_fixture
section_count = 0
for section in sections:
section_count += 1
assert isinstance(section, Section)
assert section_count == expected_count
def it_can_access_its_Section_instances_by_index(self, index_fixture):
sections, indicies = index_fixture
assert len(sections[0:2]) == 2
for index in indicies:
assert isinstance(sections[index], Section)
# fixtures -------------------------------------------------------
@pytest.fixture
def index_fixture(self, document_elm):
sections = Sections(document_elm, None)
return sections, [0, 1]
@pytest.fixture
def iter_fixture(self, document_elm):
sections = Sections(document_elm, None)
return sections, 2
@pytest.fixture
def len_fixture(self, document_elm):
sections = Sections(document_elm, None)
return sections, 2
# fixture components ---------------------------------------------
@pytest.fixture
def document_elm(self):
return element('w:document/w:body/(w:p/w:pPr/w:sectPr, w:sectPr)')
class DescribeSection(object):
def it_knows_its_start_type(self, start_type_get_fixture):
section, expected_start_type = start_type_get_fixture
assert section.start_type is expected_start_type
def it_can_change_its_start_type(self, start_type_set_fixture):
section, new_start_type, expected_xml = start_type_set_fixture
section.start_type = new_start_type
assert section._sectPr.xml == expected_xml
def it_knows_its_page_width(self, page_width_get_fixture):
section, expected_page_width = page_width_get_fixture
assert section.page_width == expected_page_width
def it_can_change_its_page_width(self, page_width_set_fixture):
section, new_page_width, expected_xml = page_width_set_fixture
section.page_width = new_page_width
assert section._sectPr.xml == expected_xml
def it_knows_its_page_height(self, page_height_get_fixture):
section, expected_page_height = page_height_get_fixture
assert section.page_height == expected_page_height
def it_can_change_its_page_height(self, page_height_set_fixture):
section, new_page_height, expected_xml = page_height_set_fixture
section.page_height = new_page_height
assert section._sectPr.xml == expected_xml
def it_knows_its_page_orientation(self, orientation_get_fixture):
section, expected_orientation = orientation_get_fixture
assert section.orientation is expected_orientation
def it_can_change_its_orientation(self, orientation_set_fixture):
section, new_orientation, expected_xml = orientation_set_fixture
section.orientation = new_orientation
assert section._sectPr.xml == expected_xml
def it_knows_its_page_margins(self, margins_get_fixture):
section, margin_prop_name, expected_value = margins_get_fixture
value = getattr(section, margin_prop_name)
assert value == expected_value
def it_can_change_its_page_margins(self, margins_set_fixture):
section, margin_prop_name, new_value, expected_xml = (
margins_set_fixture
)
setattr(section, margin_prop_name, new_value)
assert section._sectPr.xml == expected_xml
def it_provides_access_to_its_header(self, header_fixture):
section, Header_, sectPr, header_ = header_fixture
header = section.header
Header_.assert_called_once_with(
sectPr, section, WD_HEADER_FOOTER.PRIMARY
)
assert header is header_
# fixtures -------------------------------------------------------
@pytest.fixture
def header_fixture(self, Header_, header_):
sectPr = element('w:sectPr')
section = Section(sectPr, None)
return section, Header_, sectPr, header_
@pytest.fixture(params=[
('w:sectPr/w:pgMar{w:left=120}', 'left_margin', 76200),
('w:sectPr/w:pgMar{w:right=240}', 'right_margin', 152400),
('w:sectPr/w:pgMar{w:top=-360}', 'top_margin', -228600),
('w:sectPr/w:pgMar{w:bottom=480}', 'bottom_margin', 304800),
('w:sectPr/w:pgMar{w:gutter=600}', 'gutter', 381000),
('w:sectPr/w:pgMar{w:header=720}', 'header_distance', 457200),
('w:sectPr/w:pgMar{w:footer=840}', 'footer_distance', 533400),
('w:sectPr/w:pgMar', 'left_margin', None),
('w:sectPr', 'top_margin', None),
])
def margins_get_fixture(self, request):
sectPr_cxml, margin_prop_name, expected_value = request.param
section = Section(element(sectPr_cxml), None)
return section, margin_prop_name, expected_value
@pytest.fixture(params=[
('w:sectPr', 'left_margin', Inches(1),
'w:sectPr/w:pgMar{w:left=1440}'),
('w:sectPr', 'right_margin', Inches(0.5),
'w:sectPr/w:pgMar{w:right=720}'),
('w:sectPr', 'top_margin', Inches(-0.25),
'w:sectPr/w:pgMar{w:top=-360}'),
('w:sectPr', 'bottom_margin', Inches(0.75),
'w:sectPr/w:pgMar{w:bottom=1080}'),
('w:sectPr', 'gutter', Inches(0.25),
'w:sectPr/w:pgMar{w:gutter=360}'),
('w:sectPr', 'header_distance', Inches(1.25),
'w:sectPr/w:pgMar{w:header=1800}'),
('w:sectPr', 'footer_distance', Inches(1.35),
'w:sectPr/w:pgMar{w:footer=1944}'),
('w:sectPr', 'left_margin', None, 'w:sectPr/w:pgMar'),
('w:sectPr/w:pgMar{w:top=-360}', 'top_margin', Inches(0.6),
'w:sectPr/w:pgMar{w:top=864}'),
])
def margins_set_fixture(self, request):
sectPr_cxml, property_name, new_value, expected_cxml = request.param
section = Section(element(sectPr_cxml), None)
expected_xml = xml(expected_cxml)
return section, property_name, new_value, expected_xml
@pytest.fixture(params=[
('w:sectPr/w:pgSz{w:orient=landscape}', WD_ORIENT.LANDSCAPE),
('w:sectPr/w:pgSz{w:orient=portrait}', WD_ORIENT.PORTRAIT),
('w:sectPr/w:pgSz', WD_ORIENT.PORTRAIT),
('w:sectPr', WD_ORIENT.PORTRAIT),
])
def orientation_get_fixture(self, request):
sectPr_cxml, expected_orientation = request.param
section = Section(element(sectPr_cxml), None)
return section, expected_orientation
@pytest.fixture(params=[
(WD_ORIENT.LANDSCAPE, 'w:sectPr/w:pgSz{w:orient=landscape}'),
(WD_ORIENT.PORTRAIT, 'w:sectPr/w:pgSz'),
(None, 'w:sectPr/w:pgSz'),
])
def orientation_set_fixture(self, request):
new_orientation, expected_cxml = request.param
section = Section(element('w:sectPr'), None)
expected_xml = xml(expected_cxml)
return section, new_orientation, expected_xml
@pytest.fixture(params=[
('w:sectPr/w:pgSz{w:h=2880}', Inches(2)),
('w:sectPr/w:pgSz', None),
('w:sectPr', None),
])
def page_height_get_fixture(self, request):
sectPr_cxml, expected_page_height = request.param
section = Section(element(sectPr_cxml), None)
return section, expected_page_height
@pytest.fixture(params=[
(None, 'w:sectPr/w:pgSz'),
(Inches(2), 'w:sectPr/w:pgSz{w:h=2880}'),
])
def page_height_set_fixture(self, request):
new_page_height, expected_cxml = request.param
section = Section(element('w:sectPr'), None)
expected_xml = xml(expected_cxml)
return section, new_page_height, expected_xml
@pytest.fixture(params=[
('w:sectPr/w:pgSz{w:w=1440}', Inches(1)),
('w:sectPr/w:pgSz', None),
('w:sectPr', None),
])
def page_width_get_fixture(self, request):
sectPr_cxml, expected_page_width = request.param
section = Section(element(sectPr_cxml), None)
return section, expected_page_width
@pytest.fixture(params=[
(None, 'w:sectPr/w:pgSz'),
(Inches(4), 'w:sectPr/w:pgSz{w:w=5760}'),
])
def page_width_set_fixture(self, request):
new_page_width, expected_cxml = request.param
section = Section(element('w:sectPr'), None)
expected_xml = xml(expected_cxml)
return section, new_page_width, expected_xml
@pytest.fixture(params=[
('w:sectPr', WD_SECTION.NEW_PAGE),
('w:sectPr/w:type', WD_SECTION.NEW_PAGE),
('w:sectPr/w:type{w:val=continuous}', WD_SECTION.CONTINUOUS),
('w:sectPr/w:type{w:val=nextPage}', WD_SECTION.NEW_PAGE),
('w:sectPr/w:type{w:val=oddPage}', WD_SECTION.ODD_PAGE),
('w:sectPr/w:type{w:val=evenPage}', WD_SECTION.EVEN_PAGE),
('w:sectPr/w:type{w:val=nextColumn}', WD_SECTION.NEW_COLUMN),
])
def start_type_get_fixture(self, request):
sectPr_cxml, expected_start_type = request.param
section = Section(element(sectPr_cxml), None)
return section, expected_start_type
@pytest.fixture(params=[
('w:sectPr/w:type{w:val=oddPage}', WD_SECTION.EVEN_PAGE,
'w:sectPr/w:type{w:val=evenPage}'),
('w:sectPr/w:type{w:val=nextPage}', None,
'w:sectPr'),
('w:sectPr', None,
'w:sectPr'),
('w:sectPr/w:type{w:val=continuous}', WD_SECTION.NEW_PAGE,
'w:sectPr'),
('w:sectPr/w:type', WD_SECTION.NEW_PAGE,
'w:sectPr'),
('w:sectPr/w:type', WD_SECTION.NEW_COLUMN,
'w:sectPr/w:type{w:val=nextColumn}'),
])
def start_type_set_fixture(self, request):
initial_cxml, new_start_type, expected_cxml = request.param
section = Section(element(initial_cxml), None)
expected_xml = xml(expected_cxml)
return section, new_start_type, expected_xml
# fixture components ---------------------------------------------
@pytest.fixture
def Header_(self, request, header_):
return class_mock(
request, 'docx.section.Header', return_value=header_
)
@pytest.fixture
def header_(self, request):
return instance_mock(request, Header)
``` |
{
"source": "J-mie6/icfp-video-scripts",
"score": 3
} |
#### File: J-mie6/icfp-video-scripts/icfp.py
```python
import os, sys, subprocess, argparse, functools, platform
# My ffmpeg directory, just because
#"F:\Downloads\ffmpeg-20181007-0a41a8b-win64-static\ffmpeg-20181007-0a41a8b-win64-static\bin\ffmpeg.exe"
# Required command for join
#C:/Users/Jamie/AppData/Local/Programs/Python/Python35/python.exe icfp.py -f "F:\Downloads\ffmpeg-20181007-0a41a8b-win64-static\ffmpeg-20181007-0a41a8b-win64-static\bin" -ha join --heads ICFP18Norm\\heads --tails ICFP18Norm\\tails -o ICFP18Norm\\final
rm = "del" if platform.system() == "Windows" else "rm"
sep = "\\" if platform.system() == "Windows" else "/"
drop = 5 if platform.system() == "Windows" else 3
exe = ".exe" if platform.system() == "Windows" else ""
## Script Functionality ##
#TODO - Complete
def normalise(args):
ffmpeg = args.ffmpeg
hwaccel = args.hardware_acceleration
vids = args.videos
output_dir = args.output_dir
if output_dir is None: output_dir = path(vids, "normalized")
def join(args):
ffmpeg = args.ffmpeg
hwaccel = args.hardware_acceleration
head_dir = args.heads
tail_dir = args.tails
output_dir = args.output_dir
vids = [v for v in os.listdir(head_dir) if v.endswith("mp4")]
for vid in vids:
concat(path(ffmpeg, "ffmpeg" + exe),
to=path(output_dir, vid),
head=path(head_dir, vid),
tail=path(tail_dir, vid))
def split(args):
ffmpeg = args.ffmpeg
hwaccel = args.hardware_acceleration
heads = args.heads
tails = args.tails
head_length = args.head_length
vid_dir = args.videos
if heads is None: heads = path(vids, "heads")
if tails is None: tails = path(vids, "tails")
head_end = "00:00:{}".format(pad(str(head_length), 2, '0'))
vids = [v for v in os.listdir(vid_dir) if v.endswith("mp4")]
for vid in vids:
length = find_length(path(ffmpeg, "ffprobe" + exe), path(vid_dir, vid)).split(":")
secs = 0
for t in length: secs = secs * 60 + int(t)
secs -= head_length
hours = str(secs // 3600)
mins = str((secs % 3600) // 60)
secs = str(secs % 60)
tail_length = ":".join([pad(hours, 2, '0'), pad(mins, 2, '0'), pad(secs, 2, '0')])
slice_vid(path(ffmpeg, "ffmpeg" + exe), path(vid_dir, vid), "00:00:00", head_end, path(heads, vid))
slice_vid(path(ffmpeg, "ffmpeg" + exe), path(vid_dir, vid), head_end, tail_length, path(tails, vid))
def add_logo(args):
ffmpeg = args.ffmpeg
hwaccel = args.hardware_acceleration
vid_dir = args.videos
output_dir = args.output_dir
logo = args.logo
cmd = "{ffmpeg} -hwaccel nvdec -i {vid} -framerate 30000/1001 -loop 1 -i {logo} -filter_complex \"[1:v] fade=out:st=3:d=2:alpha=1 [ov]; [0:v][ov] overlay=10:10 [v]\" -map \"[v]\" -map 0:a -c:v h264_nvenc -c:a copy -shortest {out}"
vids = [v for v in os.listdir(vid_dir) if v.endswith("mp4")]
for vid in vids:
execute(cmd.format(ffmpeg=path(ffmpeg, "ffmpeg" + exe), vid=path(vid_dir, vid), out=path(output_dir, vid), logo=logo))
## Helper Functions ##
def path(*parts): return sep.join(parts)
def pad(s, n, d): return (n - len(s)) * d + s
def execute(cmd):
return str(subprocess.run(cmd, shell=True, stdout=subprocess.PIPE).stdout)[2:-drop]
def concat(ffmpeg, head, tail, to, fast=False):
cmd1 = "{ffmpeg} -hwaccel nvdec -i {head} -c copy -bsf:v h264_mp4toannexb -f mpegts tmp-1.ts"
cmd2 = "{ffmpeg} -hwaccel nvdec -i {tail} -c copy -bsf:v h264_mp4toannexb -f mpegts tmp-2.ts"
cmd3 = "{ffmpeg} -hwaccel nvdec -f mpegts -i \"concat:tmp-1.ts|tmp-2.ts\" -c copy -bsf:a aac_adtstoasc {to}"
cmd4 = "{rm} tmp-1.ts tmp-2.ts".format(rm=rm)
#cmd = "{ffmpeg} -hwaccel nvdec -i {head} -i {tail} -filter_complex \"[0:v:0][0:a:0][1:v:0][1:a:0]concat=n=2:v=1:a=1[outv][outa]\" -map \"[outv]\" -map \"[outa]\" -c:v h264_nvenc {to}"
execute(cmd1.format(ffmpeg=ffmpeg, head=head))
execute(cmd2.format(ffmpeg=ffmpeg, tail=tail))
execute(cmd3.format(ffmpeg=ffmpeg, to=to))
execute(cmd4)
#execute(cmd.format(ffmpeg=ffmpeg, head=head, tail=tail, to=to))
def slice_vid(ffmpeg, vid, start, end, out):
#cmd = ffmpeg + " -i {vid} -vcodec copy -acodec copy -ss {start} -t {duration} {out}"
cmd = ffmpeg + " -hwaccel nvdec -ss {start} -i {vid} -t {duration} -c:v h264_nvenc -c:a aac -strict experimental -b:a 128k {out}"
run = cmd.format(vid=vid, out=out, start=start, duration=end)
execute(run)
def find_length(ffprobe, vid):
cmd = ffprobe + " -v error -show_entries format=duration -sexagesimal -of default=noprint_wrappers=1:nokey=1 {vid}"
run = cmd.format(vid=vid)
return execute(run).split(".")[0]
## Main ##
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="ICFP Video Processing Script")
parser.add_argument("-f",
"--ffmpeg",
help="install directory of ffmpeg",
required=True)
subparsers = parser.add_subparsers(help="help for subcommand")
parser_normalise = subparsers.add_parser("normalize", help="normalizes the audio of every video in a directory")
parser_normalise.add_argument("-v",
"--videos",
help="directory containing videos, defaults to current directory",
default=".")
parser_normalise.add_argument("-o",
"--output-dir",
help="directory to place results, defaults to video directory with new subfolder \"normalized\"",
default=None)
parser_normalise.set_defaults(func=normalise)
parser_split = subparsers.add_parser("split", help="splits every video in a directory into a head and a tail, with the head of a specified length")
parser_split.add_argument("-v",
"--videos",
help="directory containing videos, defaults to current directory",
default=".")
parser_split.add_argument("-hd",
"--heads",
help="directory to place heads, defaults to video directory with new subfolder \"heads\"",
default=None)
parser_split.add_argument("-td",
"--tails",
help="directory to place tails, defaults to video directory with new subfolder \"heads\"",
default=None)
parser_split.add_argument("-l",
"--head-length",
help="how long the prefix split for the videos should be, in seconds (default 10)",
type=int,
default=10)
parser_split.set_defaults(func=split)
parser_join = subparsers.add_parser("join", help="remerges back videos split into heads and tails")
parser_join.add_argument("-o",
"--output-dir",
help="directory to place results, defaults to a new subfolder \"joined\"",
default="joined")
parser_join.add_argument("-hd",
"--heads",
help="directory containing heads, defaults to directory \"heads\" in current directory",
default="heads")
parser_join.add_argument("-td",
"--tails",
help="directory containing tails, defaults to directory \"tails\" in current directory",
default="tails")
parser_join.set_defaults(func=join)
parser_icon = subparsers.add_parser("add-logos", help="adds the ICFP logo to each video in a directory, image is scaled automatically")
parser_icon.add_argument("-v",
"--videos",
help="directory containing videos, defaults to current directory",
default=".")
parser_icon.add_argument("-l",
"--logo",
help="this year's ICFP logo in png format",
required=True)
parser_icon.add_argument("-o",
"--output-dir",
help="directory to place results, defaults to a new subfolder \"titled\"",
default="titled")
parser.add_argument("-ha",
"--hardware-acceleration",
help="attempt to perform hardware acceleration where possible",
action="store_true")
parser_icon.set_defaults(func=add_logo)
args = parser.parse_args()
args.func(args)
```
#### File: J-mie6/icfp-video-scripts/rejoin.py
```python
import os, sys, subprocess
ffmpeg = r"F:\Downloads\ffmpeg-20181007-0a41a8b-win64-static\ffmpeg-20181007-0a41a8b-win64-static\bin\ffmpeg.exe"
def concat(s, e, to):
cmd1 = ffmpeg + " -hwaccel nvdec -i %s -c copy -bsf:v h264_mp4toannexb -f mpegts tmp-1.ts"
cmd2 = ffmpeg + " -hwaccel nvdec -i %s -c copy -bsf:v h264_mp4toannexb -f mpegts tmp-2.ts"
cmd3 = ffmpeg + " -hwaccel nvdec -f mpegts -i \"concat:tmp-1.ts|tmp-2.ts\" -c copy -bsf:a aac_adtstoasc %s"
cmd4 = "del tmp-1.ts tmp-2.ts"
os.system(cmd1 % s)
os.system(cmd2 % e)
os.system(cmd3 % to)
os.system(cmd4)
if __name__ == "__main__":
files = [f for f in os.listdir("ICFP18Norm\\titled") if f.endswith("mp4")]
for f in files:
print(f)
concat("ICFP18Norm\\titled\\" + f, "ICFP18Norm\\tails\\" + f, "ICFP18Norm\\final\\" + f)
``` |
{
"source": "jmigual/OiS",
"score": 3
} |
#### File: OiS/Info/distribucions.py
```python
import random
import math
def normal(mu, sigma):
y1 = random.random()
y2 = random.random()
t = math.sqrt(-2*math.log(y1))*math.sin(2*math.pi*y2)
return mu + t*sigma
def densTriang(a, b, c, x):
if x < c:
return 2*(x - a)/((b - a)*(c - a))
return 2*(b - x)/((b - a)*(b - c))
def triangular(a, b, c):
k = 2/(b - a)
y = random.uniform(0, k)
x = random.uniform(a, b)
if y < densTriang(a, b, c, x):
return x
return triangular(a, b, c)
def bernoulli(p):
return int(random.random() <= p)
def binomial(n, p):
s = 0
for i in range(n):
s += bernoulli(p)
return s
def poisson(l):
s = 1.0
y = random.random()
val = y*math.exp(l)
k = 1
while s < val:
s += math.pow(l, k)/math.factorial(k)
k += 1
return k
def exponencial(l):
y = random.random()
return -1.*math.log(y)/l
def erlangk(k, mu):
prod = 1
for i in range(k):
prod *= random.random()
return -mu*math.log(prod)/k
n = 1000
suma = 0
for i in range(n):
aux = erlangk(1, 2.0)
suma += aux
print(aux)
#print("mitjana:", suma/n)
```
#### File: OiS/Treball/logger.py
```python
import logging
def configure_default_logger():
log = logging.getLogger()
log.setLevel(logging.INFO)
formatter = logging.Formatter("[{asctime}s] ({levelname}) {message}", style="{")
handler_s = logging.StreamHandler()
handler_f = logging.FileHandler("info.log")
handler_s.setFormatter(formatter)
handler_f.setFormatter(formatter)
log.addHandler(handler_s)
log.addHandler(handler_f)
configure_default_logger()
``` |
{
"source": "jmigual/projecte2",
"score": 3
} |
#### File: projecte2/Code/DirectorMidiFile.py
```python
import time
import mido
from logger import *
# The default tempo is 120 BPM.
# (500000 microseconds per beat (quarter note).)
DEFAULT_TEMPO = 500000
DEFAULT_TICKS_PER_BEAT = 480
def to_abstime(messages, i):
now = 0
for msg in messages:
now += msg.time
yield msg.copy(time=now), i
def to_reltime(messages):
"""Convert messages to relative time."""
now = 0
for msg, track in messages:
delta = msg.time - now
yield msg.copy(time=delta), track
now = msg.time
def fix_end_of_track(messages):
"""Remove all end_of_track messages and add one at the end.
This is used by merge_tracks() and MidiFile.save()."""
# Accumulated delta time from removed end of track messages.
# This is added to the next message.
accum = 0
for msg, track in messages:
if msg.type == 'end_of_track':
accum += msg.time
else:
if accum:
delta = accum + msg.time
yield msg.copy(time=delta), track
accum = 0
else:
yield msg, track
yield mido.MetaMessage('end_of_track', time=accum), 0
def merge_tracks(tracks):
messages = []
for i, track in enumerate(tracks):
messages.extend(to_abstime(track, i))
messages.sort(key=lambda x: x[0].time)
return mido.MidiTrack(fix_end_of_track(to_reltime(messages)))
class DirectorMidiFile(mido.MidiFile):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __iter__(self):
if self.type == 2:
raise TypeError("can't merge tracks in type 2 (asynchronous) file")
tempo = DEFAULT_TEMPO
for msg, track in merge_tracks(self.tracks):
# Convert message time from absolute time
# in ticks to relative time in seconds.
if msg.time > 0:
delta = mido.tick2second(msg.time, self.ticks_per_beat, tempo)
else:
delta = 0
yield msg.copy(time=delta), track
if msg.type == 'set_tempo':
tempo = msg.tempo
def play_tracks(self, meta_messages=False):
sleep = time.sleep
for msg, track in self:
if isinstance(msg, mido.MetaMessage) and not meta_messages:
continue
else:
yield msg, track
sleep(msg.time)
```
#### File: projecte2/Code/director.py
```python
import socket
import struct
import json
import argparse
from DirectorMidiFile import *
from logger import *
# 8 music voices
# message = identifier, length of data, 8 notes (one per instrument)
# note = 0 means hold the last note
# note = 255 means silence, stop the last note
dim = 1000
can_frame_fmt = "=IB3x8s"
def build_can_frame(can_id, data):
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(can_frame_fmt, can_id, can_dlc, data)
class Director:
""" Fist version using multicast ethernet
"""
def __init__(self):
self.file_name = None
# create a raw socket
print("Init Multicast socket")
self.multicast_group = ('172.16.31.10', 10000)
# Create the datagram socket
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Set a timeout so the socket does not block indefinitely when trying
# to receive data.
self.s.settimeout(0.2)
# Set the time-to-live for messages to 1 so they do not go past the
# local network segment.
ttl = struct.pack('b', 1)
self.s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
# print(str(self.notes[0]).replace(",", "\n"))
self.mid = None
self.tracks = None
def play(self, file_path):
self.mid = DirectorMidiFile(file_path)
self.tracks = len(self.mid.tracks)
logging.info("Playing...")
msg_out = {}
msg_in = {}
prog_change = {}
control_change = {}
for msg, track in self.mid.play_tracks():
#print(msg, track)
if msg.time > 0:
json_string = json.dumps({
"in": msg_in,
"out": msg_out,
"pc": prog_change,
"cc": control_change,
"tracks": self.tracks
})
logging.debug("data_sent:" + json_string)
sent = self.s.sendto(json_string.encode(), self.multicast_group)
msg_out = {}
msg_in = {}
prog_change = {}
control_change = {}
if msg.type == 'note_on':
messages = msg_in.get(track, [])
messages.append([msg.note, msg.velocity])
msg_in[track] = messages
elif msg.type == 'note_off':
messages = msg_out.get(track, [])
messages.append(msg.note)
msg_out[track] = messages
elif msg.type == 'program_change':
prog_change[track] = msg.program
elif msg.type == 'control_change':
messages = control_change.get(track, [])
messages.append({
"num": msg.control,
"value": msg.value
})
control_change[track] = messages
else:
print(msg)
continue
json_string = json.dumps({
"in": {},
"out": list(range(self.tracks)),
"tracks": self.tracks
})
self.s.sendto(json_string.encode(), self.multicast_group)
logging.info("Play Over")
self.s.close()
def main():
parser = argparse.ArgumentParser(description="Start director to play music with Pi Orchestra")
parser.add_argument("path", help="Path to file to be played")
parser.add_argument("-d", "--debug", action="store_true", help="Print debug information")
args = parser.parse_args()
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
logging.debug("Starting in DEBUG mode")
dir1 = Director()
dir1.play(args.path)
if __name__ == '__main__':
set_default_logger("director.log")
try:
main()
except KeyboardInterrupt:
logging.info("Shutting down Director, Thanks for the ride!")
```
#### File: projecte2/Code/logger.py
```python
import logging
def set_default_logger(file_name="info.log"):
log = logging.getLogger()
log.setLevel(logging.INFO)
formatter = logging.Formatter("[%(asctime)s] (%(levelname)s) %(message)s")
handler_s = logging.StreamHandler()
handler_f = logging.FileHandler(file_name)
handler_s.setFormatter(formatter)
handler_f.setFormatter(formatter)
log.addHandler(handler_s)
log.addHandler(handler_f)
``` |
{
"source": "jmigual/socialQuiz",
"score": 2
} |
#### File: backend/flaskrun/flaskrun.py
```python
import getopt
import sys
def flask_run(app, host='0.0.0.0', port=5000, threaded=False, debug=False):
options, args = getopt.gnu_getopt(sys.argv, 'dh:p:t',
["debug", "host=", "threaded", "port="])
for o, a in options:
if o in ("--debug", "-d"):
debug = True
elif o in ("-h", "--host"):
host = a
elif o in ("-p", "--port"):
port = a
elif o in ("-t", "--threaded"):
threaded = True
print("Started execution of Social Quiz")
print("Debug: %s" % debug)
print("Host: %s:%s" % (host, port))
print("Threaded: %s" % threaded)
app.run(debug=debug, host=host, port=port, threaded=threaded)
```
#### File: socialQuiz/backend/social_quiz.py
```python
import json
import os.path
import random
import re
from flask import Flask, send_from_directory
from flask import request, abort
from flaskrun.flaskrun import flask_run
import datab.social_database as db
app = Flask(__name__)
# Regular expression to only accept certain files
fileChecker = re.compile(r"(.*\.js|.*\.html|.*\.png|.*\.css|.*\.map)$")
numberOfAnswers = 4
random.seed(7)
def root_dir(): # pragma: no cover
return os.path.abspath(os.path.dirname(__file__))
@app.route('/')
def root():
return index("index2.html")
@app.route('/<path:filename>')
def index(filename):
if fileChecker.match(filename):
return send_from_directory(os.path.join(root_dir(), 'static'), filename)
abort(403)
@app.route('/register')
def register():
# To obtain the mail
email = request.args.get('email')
print(email)
if email is None:
return json.dumps({})
id_user = db.register_or_get_email(email)
return json.dumps({"id": id_user})
@app.route('/join_room')
def join_room():
room_id = request.args.get('room_id')
email = request.args.get('email')
user_id = db.register_or_get_email(email)
db.exec_query("REPLACE INTO room_members (room_id, user_id) VALUES (%s,%s)", [room_id, user_id])
return json.dumps({"id": user_id})
@app.route('/answered_room')
def answered_room():
room_id = request.args.get('room_id')
user_id = request.args.get('user_id')
values = db.exec_query("SELECT a.id "
"FROM answer a INNER JOIN question q "
"WHERE a.question_id = q.id AND q.room_id = %s AND a.user_id= %s",
[room_id, user_id])
return json.dumps({"answered": len(values) > 0})
@app.route('/get_user_id')
def get_user_id():
email = request.args.get('email')
id_user = db.register_or_get_email(email)
return json.dumps({"id": id_user})
@app.route('/create_room')
def create_room():
user_id = request.args.get('user_id')
room_id = db.exec_query("INSERT INTO room (creator) VALUES (%s)", [user_id])
return json.dumps({"id": room_id})
@app.route('/get_rooms')
def get_rooms():
user_id = request.args.get('user_id')
values = db.exec_query("SELECT r.id, r.status FROM room r WHERE r.creator=%s", [user_id])
response = []
for val in values:
response.append({"id": val[0], "status": val[1]})
return json.dumps(response)
@app.route('/fill_room', methods=['POST'])
def fill_room():
json_data = request.get_json()
if json_data is None:
return json.dumps({"error": "no JSON found"})
else:
room_id = json_data["room_id"]
questions = json_data["question"]
for q in questions:
db.exec_query("INSERT INTO question (room_id, question) VALUES (%s, %s)", [room_id, q])
return json.dumps({"info": "Data received"})
@app.route('/open_room')
def open_room():
room_id = request.args.get('room_id')
print(room_id)
db.exec_query("UPDATE room r SET r.status='started' WHERE r.id = %s", [room_id])
return json.dumps({"info": "The room has been opened successfully", "status": "started"})
@app.route('/close_room')
def close_room():
room_id = request.args.get('room_id')
db.exec_query("UPDATE room r SET r.status='closed' WHERE r.id = %s", [room_id])
return json.dumps({"info": "The room has been closed successfully", "status": "closed"})
@app.route('/finish_room')
def finish_room():
room_id = request.args.get('room_id')
db.exec_query("UPDATE room r SET r.status='finished' WHERE r.id = %s", [room_id])
# for
# SELECT id, COUNT(a.id), COUNT(a.id) FROM Room r INNER JOIN
values = db.exec_query("SELECT u.email , COUNT(qq.id) "
"FROM quiz_question qq "
"INNER JOIN users u ON (qq.asked_user_id = u.id) "
"INNER JOIN room_members rm ON (u.id = rm.user_id) "
"WHERE qq.correct_answer_id = qq.answered_id AND rm.room_id = %s "
"GROUP BY u.email "
"ORDER BY COUNT(qq.id) DESC",
[room_id])
ranking = []
for row in values:
ranking.append({"email": row[0], "correct": row[1]})
return json.dumps({"ranking": ranking})
@app.route('/room_status')
def status_room():
room_id = request.args.get('room_id')
# SELECT status FROM Room WHERE id = 1
values = db.exec_query("SELECT status FROM room WHERE id = %s", [room_id])
return json.dumps({
"status": values[0][0]
})
@app.route('/get_room_questions')
def get_room_question():
room_id = request.args.get('room_id')
values = db.exec_query("SELECT id, question FROM question WHERE room_id = %s", [room_id])
response = []
for val in values:
response.append({"id": val[0], "text": val[1]})
return json.dumps({"questions": response})
@app.route('/post_room_answers', methods=['POST'])
def post_room_answers():
json_data = request.get_json()
if json_data is None:
return json.dumps({"error": "no JSON found"}), 404
user_id = json_data["user_id"]
values = []
for a in json_data["answers"]:
values.append((a["id"], user_id, a["text"]))
print(values[len(values) - 1])
db.exec_many_query("INSERT INTO answer (question_id, user_id, answer) VALUES(%s,%s,%s)", values)
return json.dumps({"info": "Data received"})
@app.route('/get_quiz_question')
def get_question():
room_id = int(request.args.get('room_id'))
user_id = int(request.args.get('user_id'))
possible_questions = db.get_non_answered_questions(room_id, user_id)
possible_users_to_ask = db.get_non_answered_people(room_id, user_id)
question_id = []
asked_about_id = []
if len(possible_questions) > 0:
question_id = random.sample(possible_questions, 1)
else:
possible_questions = db.get_all_questions(room_id)
if len(possible_questions) > 0:
question_id = random.sample(possible_questions, 1)
if len(possible_users_to_ask) > 0:
asked_about_id = random.sample(possible_users_to_ask, 1)
else:
possible_users_to_ask = db.get_all_different_people(room_id, user_id)
if len(possible_questions) > 0:
asked_about_id = random.sample(possible_users_to_ask, 1)
if len(question_id) > 0 and 0 < len(asked_about_id):
quiz_question_id = db.insert_quiz_question(user_id, asked_about_id[0], question_id[0])
other_users = db.get_all_different_people(room_id, asked_about_id[0])
random.shuffle(other_users)
answers = []
(answer_id, text_id) = db.get_answer(question_id[0], asked_about_id[0])
db.exec_query("UPDATE quiz_question SET correct_answer_id=%s WHERE id = %s", [answer_id, quiz_question_id])
answers.append((answer_id, text_id))
if min(numberOfAnswers - 1, len(other_users)) > 0:
for i in range(min(numberOfAnswers - 1, len(other_users))):
(answer_id, text_id) = db.get_answer(question_id[0], other_users[i])
answers.append((answer_id, text_id))
# if commented the first answer will be the correct one
random.shuffle(answers)
answer_json = []
for (answer_id, text_id) in answers:
answer_json.append({"id": answer_id, "text": text_id})
print(quiz_question_id)
# SELECT 'question' FROM 'Question' WHERE 'id' = 3
value = db.exec_query("SELECT id "
"FROM quiz_question "
"WHERE asked_user_id = %s AND about_user_id = %s AND question_id = %s",
[user_id, asked_about_id[0], question_id[0]])
quiz_question_id = value[0][0]
value = db.exec_query("SELECT q.question "
"FROM question q "
"WHERE q.id = %s",
[question_id[0]])
question_text = value[0][0]
value = db.exec_query("SELECT u.email "
"FROM users u "
"WHERE u.id=%s",
[asked_about_id[0]])
user_name = value[0][0]
question_text = "What did %s answer to '%s' ?" % (user_name, question_text)
return json.dumps({
"id": quiz_question_id,
"question": question_text,
"answers": answer_json
})
else:
return json.dumps({"error": "Not available questions for this user in this room"})
@app.route('/post_quiz_answer')
def post_answer():
quiz_question_id = request.args.get('quiz_question_id')
quiz_answer_id = request.args.get('quiz_answer_id')
db.exec_query("UPDATE quiz_question SET answered_id = %s WHERE id = %s", [quiz_answer_id, quiz_question_id])
value = db.exec_query("SELECT qq.answered_id, qq.correct_answer_id, qq.question_id "
"FROM quiz_question qq "
"WHERE qq.id = %s", [quiz_question_id])
answered_id = value[0][0]
correct_answer_id = value[0][1]
question_id = value[0][2]
value = db.exec_query("SELECT a.answer FROM answer a WHERE a.id = %s ", [correct_answer_id])
if len(value) > 0:
text = value[0][0]
else:
text = "something when wrong"
if value is None:
return json.dumps({"error": "Internal server error"})
return json.dumps({
"correct": answered_id == correct_answer_id,
"question": question_id,
"correct_answer": {"id": correct_answer_id, "text": text}
})
if __name__ == '__main__':
flask_run(app)
``` |
{
"source": "jmigueldelgado/buhayra",
"score": 2
} |
#### File: buhayra/buhayra/defAggregations.py
```python
import json
import geojson
from bson import json_util
import os
import sys
import datetime
from buhayra.getpaths import *
import socket
import subprocess
def getRandomSubset():
bbox = Polygon([[-40.20,-3.0], [-38.3,-3.0], [-38.3,-4.50], [-40.20,-4.50]])
path_to_geojson = aggr.ogr_getJRC()
with fiona.open(path_to_geojson,'r') as latest:
ids=list()
for feat in latest:
if not shape(feat['geometry']).within(bbox):
continue
ids.append('{}'.format(feat['properties']['id_jrc']))
path_to_geojson = aggr.ogr_getRandomSubset(ids)
def ogr_getJRC():
path_to_geojson = os.path.join(home['home'],'JRC.geojson')
if os.path.isfile(path_to_geojson):
pass
else:
with open(os.path.join(home['home'],'ogr_query.log'), 'a') as o_std, open(os.path.join(home['home'], 'ogr_query.err'), 'a') as o_err:
#query = 'geom from (select distinct on (id_jrc) id_jrc, ingestion_time, area, geom, id from neb order by id_jrc, ingestion_time desc) as subquery using unique id using srid=4326'
query = 'select distinct on (id_jrc) * from jrc_neb order by id_jrc desc'
call=['nohup','ogr2ogr','-f','GeoJSON' ,path_to_geojson, 'PG:host='+postgis_host+' dbname=watermasks user=' +postgis_user+' password='+postgis_pass,'-sql',query]
p = subprocess.Popen(call, stdout=o_std, stderr=o_err, preexec_fn=os.setpgrp)
while p.wait()!=0:
pass
return path_to_geojson
def ogr_getLatestIngestionTime():
path_to_geojson = os.path.join(home['home'],'latest-watermask'+ datetime.datetime.today().strftime('%Y-%m-%d')+'.geojson')
if os.path.isfile(path_to_geojson):
pass
else:
with open(os.path.join(home['home'],'ogr_query.log'), 'a') as o_std, open(os.path.join(home['home'], 'ogr_query.err'), 'a') as o_err:
#query = 'geom from (select distinct on (id_jrc) id_jrc, ingestion_time, area, geom, id from neb order by id_jrc, ingestion_time desc) as subquery using unique id using srid=4326'
query = 'select distinct on (id_jrc) id_jrc, ingestion_time, area, ST_centroid(geom), id from neb order by id_jrc, ingestion_time desc'
call=['nohup','ogr2ogr','-f','GeoJSON' ,path_to_geojson, 'PG:host='+postgis_host+' dbname=watermasks user=' +postgis_user+' password='+<PASSWORD>,'-sql',query]
p = subprocess.Popen(call, stdout=o_std, stderr=o_err, preexec_fn=os.setpgrp)
while p.wait()!=0:
pass
return path_to_geojson
def ogr_getTimeSeriesID(id):
path_to_geojson = os.path.join(home['home'],'time-series-'+ str(id) +'.geojson')
with open(os.path.join(home['home'],'ogr_query.log'), 'a') as o_std, open(os.path.join(home['home'], 'ogr_query.err'), 'a') as o_err:
#query = 'geom from (select distinct on (id_jrc) id_jrc, ingestion_time, area, geom, id from neb order by id_jrc, ingestion_time desc) as subquery using unique id using srid=4326'
query = ('select id_jrc, ingestion_time, area, geom' +
' from neb'+
' order by id_jrc, ingestion_time desc')
call=['nohup','ogr2ogr','-f','GeoJSON' ,path_to_geojson, 'PG:host='+postgis_host+' dbname=watermasks user=' +postgis_user+' password='+postgis_pass,'-sql',query]
p = subprocess.Popen(call, stdout=o_std, stderr=o_err, preexec_fn=os.setpgrp)
while p.wait()!=0:
pass
return path_to_geojson
def ogr_getRandomSubset(idlist):
path_to_geojson = os.path.join(home['home'],'time-series-'+ datetime.datetime.today().strftime('%Y-%m-%d') +'.geojson')
with open(os.path.join(home['home'],'ogr_query.log'), 'a') as o_std, open(os.path.join(home['home'], 'ogr_query.err'), 'a') as o_err:
#query = 'geom from (select distinct on (id_jrc) id_jrc, ingestion_time, area, geom, id from neb order by id_jrc, ingestion_time desc) as subquery using unique id using srid=4326'
query = ('select neb.id_jrc, neb.ingestion_time, neb.area, neb.wmxjrc_area, neb.geom'+
' from neb'+
' where neb.id_jrc in ('+','.join(idlist)+')' +
' order by id_jrc, ingestion_time desc')
call=['nohup','ogr2ogr','-f','GeoJSON' ,path_to_geojson, 'PG:host='+postgis_host+' dbname=watermasks user=' +postgis_user+' password='+postgis_pass,'-sql',query]
p = subprocess.Popen(call, stdout=o_std, stderr=o_err, preexec_fn=os.setpgrp)
while p.wait()!=0:
pass
return path_to_geojson
def ogr_getAll():
path_to_geojson = os.path.join(home['home'],'time-series-'+ datetime.datetime.today().strftime('%Y-%m-%d') +'.geojson')
with open(os.path.join(home['home'],'ogr_query.log'), 'a') as o_std, open(os.path.join(home['home'], 'ogr_query.err'), 'a') as o_err:
#query = 'geom from (select distinct on (id_jrc) id_jrc, ingestion_time, area, geom, id from neb order by id_jrc, ingestion_time desc) as subquery using unique id using srid=4326'
query = ('select neb.id_jrc, neb.ingestion_time, neb.area, neb.wmxjrc_area, neb.geom'+
' from neb'+
' order by id_jrc, ingestion_time desc')
call=['nohup','ogr2ogr','-f','GeoJSON' ,path_to_geojson, 'PG:host='+postgis_host+' dbname=watermasks user=' +postgis_user+' password='+postgis_pass,'-sql',query]
p = subprocess.Popen(call, stdout=o_std, stderr=o_err, preexec_fn=os.setpgrp)
while p.wait()!=0:
pass
return path_to_geojson
def aggr2geojson(polys):
feats=[]
for poly in polys:
oid=json.loads(json.dumps(poly['_id'],default=json_util.default))
dttm=poly['properties']['ingestion_time']
dttmstr=dttm.strftime("%Y-%m-%d %H:%M:%S")
poly['properties']['ingestion_time']=dttmstr
del poly['_id']
poly['properties']['oid']=oid['$oid']
### rename to insert into postgis
if 'platformname' in poly['properties']:
poly['properties']['source_id'] = poly['properties'].pop('platformname')
if poly['properties']['source_id']=='Sentinel-1':
poly['properties']['source_id']=1
elif poly['properties']['source_id']=='Sentinel-2':
poly['properties']['source_id']=2
elif 'source_id' in poly['properties']:
print('dealing with correct geojson attributes, no need to change anything\n')
else:
print('probably one of the first scenes to be processed, before adding sentinel-2, so it must be sentinel-1! passing 1 as source_id.\n')
poly['properties']['source_id']=1
if poly['geometry'] is None:
feats.append(geojson.Feature(geometry=None,properties=poly['properties']))
else:
## mixing poly and multiply is not accepted by postgis. we will force Polygon into MultiPolygon
if len(poly['geometry']['coordinates'])>0:
mp=geojson.MultiPolygon()
## now we have to correct syntax of MultiPolygon which was forced from Polygon so it generates valid geojson in the end
if poly["geometry"]["type"]=='Polygon':
poly["geometry"]["coordinates"]=[poly["geometry"]["coordinates"]]
#if len(poly['geometry']['coordinates'])==1:
# mp=geojson.Polygon()
mp['coordinates']=poly['geometry']['coordinates']
feats.append(geojson.Feature(geometry=mp,properties=poly['properties']))
feat_col=geojson.FeatureCollection(feats)
return(feat_col)
```
#### File: jmigueldelgado/buhayra/get_past_scenes.py
```python
from sentinelsat import SentinelAPI, read_geojson, geojson_to_wkt
from datetime import date, datetime, timedelta
import sys
import os
from buhayra.getpaths import *
import logging
from bs4 import BeautifulSoup
import requests
import urllib
from buhayra.location import *
import time
logging.basicConfig(format='%(message)s', level='INFO')
def main():
if len(sys.argv) < 2:
print(" Usage: python3 get_past_scenes.py [year] [month]")
return 1
api = SentinelAPI(username, password, 'https://scihub.copernicus.eu/dhus')
logging.info(api.api_url)
t0 = datetime(int(sys.argv[1]),int(sys.argv[2]),1,0,0,0)
tf = t0 + timedelta(days=12)
# search by polygon, time, and SciHub query keywords
footprint = geojson_to_wkt(read_geojson(home['parameters'] + '/extent_'+location['region']+'.geojson'))
products_s1a = api.query(footprint,
date=(
date(t0.year,t0.month,t0.day),
date(tf.year,tf.month,tf.day)
),
producttype="GRD",
platformname='Sentinel-1')
unavailable=[]
for uuid in products_s1a:
product_info = api.get_product_odata(uuid)
if any(product_info['title'] in s for s in os.listdir(sarIn)):
logging.info('Skipping '+uuid+'. Already exists in '+sarIn)
continue
logging.info('Is ' + uuid +' online?')
logging.info(product_info['Online'])
if not product_info['Online']:
logging.info('Requesting unavailable uuids')
api.download(uuid)
unavailable=unavailable + [uuid]
else:
logging.info('Downloading available uuids')
api.download(uuid,directory_path=sarIn)
logging.info('Sleeping 30 minutes (the API does not allow intensive requests)')
time.sleep(30*60)
while len(unavailable)>0:
for uuid in unavailable:
product_info = api.get_product_odata(uuid)
if product_info['Online']:
logging.info(uuid + ' is available! Downloading:')
api.download(uuid,directory_path=sarIn)
unavailable.remove(uuid)
time.sleep(600)
return 0
if __name__ == "__main__":
main()
```
#### File: buhayra/sar2watermask/metadata.py
```python
import psycopg2
from buhayra.getpaths import *
from buhayra.credentials import *
import buhayra.utils as utils
import os
import zipfile
import xmltodict
from datetime import datetime
import re
import logging
# ingestion_time = datetime.datetime.strptime('20181007T081706','%Y%m%dT%H%M%S')
# scene=utils.select_scene_ingestion_time(ingestion_time,sarIn)[0]
def insert_into_postgres(scenes):
logger = logging.getLogger('root')
logger.info("Connect to postgres with psycopg2")
conn = psycopg2.connect(host=postgis_host,dbname='watermasks',user=postgis_user,password=<PASSWORD>)
cur = conn.cursor()
INSERT = """INSERT INTO scene_"""+location['region']+""" (ingestion_time, mission_id, pass) VALUES (%(ingestion_time)s, %(mission_id)s, %(pass)s);"""
logger.info("Loop scenes")
for scene in scenes:
if not scene.endswith('.zip'):
continue
ingestion_time = datetime.strptime(scene.split('_')[4],'%Y%m%dT%H%M%S')
zip=zipfile.ZipFile(os.path.join(sarIn,scene))
contents=zip.namelist()
subs='vv-'+datetime.strftime(ingestion_time,'%Y%m%dt%H%M%S')
contents = [x for x in contents if len(x.split('/'))>2]
res = [x for x in contents if re.search(subs, x.split('/')[2]) and x.split('/')[1] == 'annotation']
xml=zip.read(res[0])
xdict = xmltodict.parse(xml)
logger.info("Insert metadata for "+scene)
cur.execute(INSERT,
{'table':'scene_'+location['region'],
'ingestion_time': ingestion_time,
'mission_id': xdict['product']['adsHeader']['missionId'],
'pass': xdict['product']['generalAnnotation']['productInformation']['pass']})
conn.commit()
cur.close()
conn.close()
logger.info("Finished insert")
```
#### File: buhayra/sar2watermask/sar.py
```python
from os import listdir
import os
import shutil
import datetime
import sys
import logging
from buhayra.getpaths import *
import buhayra.utils as utils
from buhayra.utils import getWMinScene, checknclean, geojson2shapely
import sar2watermask.metadata as metadata
import xml.etree.ElementTree
from snappy import Product
from snappy import GPF
from snappy import ProductIO
from snappy import jpy
from snappy import HashMap
from snappy import PixelPos
from snappy import GeoPos
from snappy import WKTReader
import json
import datetime
import subprocess
import re
import rasterio
import rasterio.mask
import fiona
from shutil import copyfile
from shapely.geometry import Polygon, shape
from shapely.ops import transform
import numpy as np
import time
System = jpy.get_type('java.lang.System')
BandDescriptor = jpy.get_type('org.esa.snap.core.gpf.common.BandMathsOp$BandDescriptor')
pid=os.getpid()
def sar2sigma_subset(scenes):
logger = logging.getLogger('root')
time0=time.process_time()
outForm='GeoTIFF+XML'
finished=0
with fiona.open(home['proj']+'/buhayra/auxdata/wm_utm_'+location['region']+'.gpkg','r') as wm:
for f in scenes:
logger.info("processing " + f)
logger.info("process ID: "+ str(pid))
product = ProductIO.readProduct(sarIn+"/"+f)
productName=product.getName()
open(sarIn+"/"+productName + '.processing','w').close()
# if (productName+".finished") in listdir(sarIn):
# logger.info("product "+productName+" already processed: skipping")
# continue
# logger.info("processing " + productName)
rect_utm=getBoundingBoxScene(product)
wm_in_scene,id_in_scene = getWMinScene(rect_utm,wm)
# product=orbit_correction(product)
product=remove_border_noise(product)
product=thermal_noise_removal(product)
product=calibration(product)
product=speckle_filtering(product)
product=geom_correction(product)
product=set_no_data_value(product)
logger.info("starting loop on reservoirs")
targetdir = os.path.join(sarOut,productName)
if not os.path.exists(targetdir):
os.mkdir(targetdir)
for i in range(0,len(id_in_scene)):
fname=productName + "_" + str(id_in_scene[i])
if (fname+".tif") in listdir(targetdir):
logger.debug("product "+fname+".tif already exists: skipping")
continue
logger.debug("subsetting product "+ str(id_in_scene[i]))
product_subset=subsetProduct(product,wm_in_scene[i])
logger.debug("writing product "+ str(id_in_scene[i]))
if os.path.exists(os.path.join(targetdir,fname + "_locked")):
os.remove(os.path.join(targetdir,fname + "_locked"))
ProductIO.writeProduct(product_subset,os.path.join(targetdir,fname + "_locked"),outForm)
product_subset.dispose()
compress_tiff(os.path.join(targetdir,fname+'_locked.tif'),os.path.join(targetdir,fname+'.tif'))
product.dispose()
open(sarIn+"/"+productName + '.finished','w').close()
os.remove(sarIn+"/"+productName + '.processing')
finished=finished+1
logger.info("**** " + f + " processed in "+str((time.process_time()-time0)/60)+" minutes****")
logger.info("**** processed " +str(finished)+" of "+ str(len(scenes))+" in loop ****")
System.gc()
metadata.insert_into_postgres(scenes)
logger.info("******************** finished loop: "+ str(len(scenes))+" scenes **")
def orbit_correction(product):
logger = logging.getLogger('root')
params = HashMap()
root = xml.etree.ElementTree.parse(home['parameters']+'/orbit_correction.xml').getroot()
for child in root:
params.put(child.tag,child.text)
result = GPF.createProduct('Apply-Orbit-File',params,product)
logger.info("finished orbit correction")
return(result)
def thermal_noise_removal(product):
logger = logging.getLogger('root')
params = HashMap()
root = xml.etree.ElementTree.parse(home['parameters']+'/thermal_noise.xml').getroot()
for child in root:
params.put(child.tag,child.text)
result = GPF.createProduct('ThermalNoiseRemoval',params,product)
logger.info("finished ThermalNoiseRemoval")
return(result)
### currently being performed with gpt
def thermal_noise_removal_gpt(product):
logger = logging.getLogger('root')
fname=product.getName()
logger.info("writing product for thermal noise removal")
ProductIO.writeProduct(product,sarIn+"/"+fname+'.dim',"BEAM-DIMAP")
logger.info("finished writing. proceeding with gpt ThermalNoiseRemoval")
product.dispose()
subprocess.call(['/users/stud09/martinsd/local/snap/bin/gpt',
'ThermalNoiseRemoval',
'-SsourceProduct='+sarIn+'/'+fname+'.dim',
'-PselectedPolarisations=VV',
'-PremoveThermalNoise=true',
'-t',
sarIn+'/'+fname+'.dim'])
result = ProductIO.readProduct(sarIn+"/"+fname + '.dim')
logger.info("finished thermal noise removal")
return(result)
def remove_border_noise(product):
logger = logging.getLogger('root')
params = HashMap()
root = xml.etree.ElementTree.parse(home['parameters']+'/border-noise.xml').getroot()
for child in root:
params.put(child.tag,child.text)
result = GPF.createProduct('Remove-GRD-Border-Noise',params,product)
logger.info("finished Remove-GRD-Border-Noise")
return(result)
def calibration(product):
logger = logging.getLogger('root')
params = HashMap()
root = xml.etree.ElementTree.parse(home['parameters']+'/calibration.xml').getroot()
for child in root:
params.put(child.tag,child.text)
result = GPF.createProduct('Calibration',params,product)
logger.info("finished calibration")
return(result)
def speckle_filtering(product):
## Speckle filtering
logger = logging.getLogger('root')
params = HashMap()
root = xml.etree.ElementTree.parse(home['parameters']+'/speckle_filtering.xml').getroot()
for child in root:
params.put(child.tag,child.text)
result = GPF.createProduct('Speckle-Filter',params,product)
logger.info("finished speckle filtering")
return(result)
def geom_correction(product):
## Geometric correction
logger = logging.getLogger('root')
params = HashMap()
root = xml.etree.ElementTree.parse(home['parameters']+'/terrain_correction.xml').getroot()
for child in root:
params.put(child.tag,child.text)
result = GPF.createProduct('Terrain-Correction',params,product)
# current_bands = CalSfCorr.getBandNames()
# logger.debug("Current Bands after Terrain Correction: %s" % (list(current_bands)))
logger.info("finished geometric correction")
return(result)
def set_no_data_value(product):
logger = logging.getLogger('root')
params = HashMap()
root = xml.etree.ElementTree.parse(home['parameters']+'/nodatavalue.xml').getroot()
for child in root:
params.put(child.tag,child.text)
result = GPF.createProduct('SetNoDataValue',params,product)
logger.info("finished set_no_data_value")
return(result)
def sigma_naught(product):
targetBands = jpy.array('org.esa.snap.core.gpf.common.BandMathsOp$BandDescriptor',1)
targetBand1 = BandDescriptor()
targetBand1.name = 'sigma_int'
targetBand1.type = 'Int32'
targetBand1.expression = 'round(log10(Sigma0_VV)*1000)'
targetBands[0] = targetBand1
parameters = HashMap()
parameters.put('targetBands', targetBands)
result = GPF.createProduct('BandMaths', parameters, product)
return(result)
def subsetProduct(product,pol):
# if pol.area<1000:
buff=pol.buffer((pol.area)**0.5)
# else:
# buff=pol.buffer((pol.area)**0.5)
bb=getBoundingBoxWM(buff)
bb_ll=utils.utm2wgs(bb)
geom = WKTReader().read(bb_ll.wkt)
parameters = HashMap()
parameters.put('copyMetadata', True)
parameters.put('geoRegion', geom)
product_subset = GPF.createProduct('Subset', parameters, product)
return(product_subset)
def getBoundingBoxScene(product):
logger = logging.getLogger('root')
gc=product.getSceneGeoCoding()
rsize=product.getSceneRasterSize()
h=rsize.getHeight()
w=rsize.getWidth()
p1=gc.getGeoPos(PixelPos(0,0),None)
p2=gc.getGeoPos(PixelPos(0,h),None)
p3=gc.getGeoPos(PixelPos(w,h),None)
p4=gc.getGeoPos(PixelPos(w,0),None)
rect=Polygon([(p1.getLon(),p1.getLat()),(p2.getLon(),p2.getLat()),(p3.getLon(),p3.getLat()),(p4.getLon(),p4.getLat())])
rect_utm=utils.wgs2utm(rect)
return(rect_utm)
def getBoundingBoxWM(pol):
coords=pol.bounds
bb=Polygon([(coords[0],coords[1]),(coords[0],coords[3]),(coords[2],coords[3]),(coords[2],coords[1])])
return(bb)
def geojson2wkt(jsgeom):
from shapely.geometry import shape,polygon
polygon=shape(jsgeom)
return(polygon.wkt)
def compress_tiff(inpath,outpath):
with rasterio.open(inpath,'r') as ds:
r=ds.read(1)
r[r==0]=np.nan
r_db=10*np.log10(r)*100
if (np.nanmax(r_db)< np.iinfo(np.int16).max) and (np.nanmin(r_db) > (np.iinfo(np.int16).min+1)):
r_db[np.isnan(r_db)]=np.iinfo(np.int16).min
r_db=np.int16(r_db)
else:
r_db[np.isnan(r_db)]=np.iinfo(np.int32).min
r_db=np.int32(r_db)
gdalParam=ds.transform.to_gdal()
with rasterio.open(outpath,'w',driver=ds.driver,height=ds.height,width=ds.width,count=1,dtype=r_db.dtype) as dsout:
dsout.write(r_db,1)
with open(outpath[:-3]+'json', 'w') as fjson:
json.dump(gdalParam, fjson)
os.remove(inpath)
os.remove(inpath[:-3]+'xml')
``` |
{
"source": "jmihali/dory",
"score": 2
} |
#### File: dory/Frontend/PULP_node.py
```python
import logging
class node_element():
# A node allocated in the PULP_Graph
def __init__(self):
self.name = 'Not-initialized'
self.kernel_shape = 'Not-initialized' # fH x fW
self.ch_in = 'Not-initialized'
self.ch_out = 'Not-initialized'
self.input_index = 'Not-initialized'
self.output_index = 'Not-initialized'
self.input_dim = 'Not-initialized' # H x W
self.output_dim = 'Not-initialized' # H x W
self.pads = 'Not-initialized' # Top, Left, Bottom, Right
self.branch_out = 0
self.branch_in = 0
self.branch_change = 0
self.branch_last = 0
self.input_activation_dimensions_L3 = 0
self.output_activation_dimensions_L3 = 0
self.inmul1 = 'empty'
self.inmul2 = 'empty'
self.weight_bits = 8
self.out_activation_bits = 8
self.input_activation_bits = 8
self.outshift = 0
self.out_add = 0 # used for pool nodes
def log_parameters(self):
for parameter in self.__dict__:
if parameter not in ['weights', 'k', 'lambda']:
logging.debug(parameter + ': ' + str(self.__dict__[parameter]))
else:
logging.debug(parameter + ': Present')
def print_parameters(self):
for parameter in self.__dict__:
if parameter not in ['weights', 'k', 'lambda']:
print(parameter + ': ' + str(self.__dict__[parameter]))
else:
print(parameter + ': Present')
def add_parameter(self, name, value):
self.__dict__[name] = value
def add_dict_parameter(self, dict_parameters):
for key, value in dict_parameters.items():
self.__dict__[key] = value
def get_parameter(self, name):
return self.__dict__[name]
```
#### File: Frontend/Quantlab/QUANTLAB_Onnx.py
```python
import onnx
from onnx import numpy_helper
from onnx import helper, shape_inference
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
import pandas as pd
from collections import OrderedDict
import logging
import PULP_node as pulp
from ONNX_management import ONNX_management
class Quantlab_onnx(ONNX_management):
# Used to manage the ONNX files. By now, supported Convolutions (PW and DW), Pooling, Fully Connected and Relu.
def __init__(self, onnx, platform):
layers_accepted = ['Conv', 'Pad', 'Mul', 'Add', 'Div', 'Constant', 'AveragePool', 'GlobalAveragePool', 'MaxPool', 'Cast', 'Clip', 'Floor', 'Flatten', 'Gemm', 'MatMul', 'Shape', 'Gather', 'Unsqueeze', 'Concat', 'Reshape', 'Sigmoid', 'LogSoftmax']
layers_neglected = ['Cast', 'Floor', 'Flatten', 'Shape', 'Gather', 'Unsqueeze', 'Concat', 'Reshape', 'Sigmoid', 'LogSoftmax']
layers_to_node = ['AveragePool', 'MaxPool', 'Conv', 'Gemm', 'MatMul', 'GlobalAveragePool', 'Add']
backend = ['ConvBNRelu', 'ConvRelu', 'ConvDWBNRelu', 'ConvDWRelu', 'AveragePool', 'GlobalAveragePool', 'MaxPool', 'LinearBNRelu', 'GemmRelu', 'Gemm', 'MatMulRelu', 'MatMul', 'Add', 'AddBNRelu', 'BNReluAddBNRelu',
'PadConvBNRelu', 'PadConvRelu', 'PadConvDWBNRelu', 'PadConvDWRelu', 'PadAveragePool', 'PadGlobalAveragePool', 'PadMaxPool', 'PadLinearBNRelu', 'PadGemmRelu', 'PadGemm', 'PadMatMulRelu', 'PadMatMul',
'PadAdd', 'PadAddBNRelu', 'PadBNReluAddBNRelu',
'AveragePoolBNRelu', 'AveragePoolRelu', 'GlobalAveragePoolBNRelu', 'GlobalAveragePoolRelu',]
rules = {}
rules['Relu'] = 'Mul-Div-Floor-Clip'
rules['BNRelu'] = 'Mul-Add-Div-Floor-Clip'
rules['Pad'] = 'Pad'
ONNX_management.__init__(self, onnx, platform, backend, rules, layers_accepted, layers_neglected, layers_to_node)
def apply_rule(self, node, rule):
pulp_node = pulp.node_element()
out = node.output[0]
nodes_to_search = rule.split('-')
blocks_to_search = len(nodes_to_search)
i = 0
for key, value in self.rules.items():
if value == rule:
break
pulp_node.add_parameter('name', key)
if rule in [self.rules['Relu'], self.rules['BNRelu']]:
for n_idx, node_iterating in enumerate(self.model.graph.node):
if (out == node_iterating.output[0] or i > 0) and node_iterating.op_type == nodes_to_search[i] and i < blocks_to_search:
if i == 0:
pulp_node.add_parameter('input_index',[input_i for input_i in node_iterating.input if 'weight' not in input_i][0])
elif i == (blocks_to_search-1):
pulp_node.add_parameter('output_index',node_iterating.output[0])
if node_iterating.op_type in ['Mul', 'Add', 'Div']:
const = self.search_constant(node_iterating.input[1], self.model)
if isinstance(const, str):
const = self.search_constant(node_iterating.input[0], self.model)
assert (not(isinstance(const, str))), f"Error in searching BNRelu parameters"
if node_iterating.op_type == 'Mul' and rule == self.rules['BNRelu']:
pulp_node.add_parameter('k', const)
pulp_node.add_parameter('outmul', 1)
# pulp_node.add_parameter('outmul', const) # this
# will later be divided by kernel area (i.e.,
# k[0]*k[1]) in the case of avgpool
elif node_iterating.op_type == 'Mul' and rule == self.rules['Relu']:
pulp_node.add_parameter('outmul', const)
elif node_iterating.op_type == 'Add':
pulp_node.add_parameter('lambda', const)
elif node_iterating.op_type == 'Div':
try:
const[0]
pulp_node.add_parameter('outshift',round(np.log2(const[0])))
except:
pulp_node.add_parameter('outshift',round(np.log2(const)))
elif node_iterating.op_type in ['Clip']:
attributes_names = [attribute.name for attribute in node_iterating.attribute]
for attribute in node_iterating.attribute:
if attribute.name == 'out_bits':
pulp_node.add_parameter('out_activation_bits', attribute.i)
i+=1
if i >= blocks_to_search:
break
elif rule == self.rules['Pad']:
pulp_node.add_parameter('name', key)
for node_iterating in (self.model.graph.node):
if out == node_iterating.output[0] and node_iterating.op_type == nodes_to_search[i] and i < blocks_to_search:
inp = []
for input_i in node_iterating.input:
if 'weight' not in input_i:
if input_i not in [node.output[0] for node in self.model.graph.node if node.op_type in 'Constant']:
inp.append(input_i)
pulp_node.add_parameter('input_index', inp[0])
pulp_node.add_parameter('output_index',node_iterating.output[0])
if np.array(node_iterating.attribute[1].ints).shape[0] == 8:
pulp_node.add_parameter('pads',[node_iterating.attribute[1].ints[2],node_iterating.attribute[1].ints[3],node_iterating.attribute[1].ints[6],node_iterating.attribute[1].ints[7]])
elif np.array(node_iterating.attribute[1].ints).shape[0] == 6:
pulp_node.add_parameter('pads',[0, node_iterating.attribute[1].ints[2], 0, node_iterating.attribute[1].ints[5]])
break
return pulp_node
def fuse_graph(self):
# Logging function to report exported graph of PULP
while True:
PULP_Nodes_Graph_fused = []
skip = 0
not_fused = 1
fuse_at_least_1 = 0
for node_1, node_2 in zip(self.PULP_Nodes_Graph[:-1], self.PULP_Nodes_Graph[1:]):
last_node = 0
if node_1.name+node_2.name in '.'.join([*self.backend]) and skip == 0:
PULP_Nodes_Graph_fused.append(self.fuse_nodes(node_1, node_2))
skip = 1
not_fused = 0
fuse_at_least_1 = 1
elif skip == 0:
PULP_Nodes_Graph_fused.append(node_1)
not_fused = 1
else:
skip = 0
last_node = 1
if not_fused == 1 or last_node == 1:
PULP_Nodes_Graph_fused.append(node_2)
self.PULP_Nodes_Graph = PULP_Nodes_Graph_fused
if fuse_at_least_1 == 0:
break
self.fuse_graph_BNReluADD()
def fuse_nodes(self, node_1, node_2):
assert (node_1.get_parameter('output_index') == node_2.get_parameter('input_index')), f"Error in fusion of near nodes with different indexes"
node_1.add_parameter('name', node_1.get_parameter('name')+node_2.get_parameter('name') )
for key, value in node_2.__dict__.items():
if (isinstance(value,str)):
if value == 'Not-initialized':
pass
elif key not in ['name', 'input_index']:
node_1.add_parameter(key,value)
elif key in ['pads']:
node_1_pads = node_1.get_parameter('pads')
node_2_pads = node_2.get_parameter('pads')
for i in range(len(node_2_pads)):
node_1_pads[i] += node_2_pads[i]
node_1.add_parameter('pads', node_1_pads)
elif key in ['branch_in']:
node_1.add_parameter('branch_in', node_1.get_parameter('branch_in') + node_2.get_parameter('branch_in'))
elif key in ['branch_out']:
node_1.add_parameter('branch_out', node_1.get_parameter('branch_out') + node_2.get_parameter('branch_out'))
elif key in ['branch_change']:
node_1.add_parameter('branch_change', node_1.get_parameter('branch_change') + node_2.get_parameter('branch_change'))
elif key in ['branch_last']:
node_1.add_parameter('branch_last', node_1.get_parameter('branch_last') + node_2.get_parameter('branch_last'))
elif key not in ['name', 'input_index', 'input_dim', 'weight_bits']:
node_1.add_parameter(key,value)
elif key in ['input_dim']:
if 'input' not in node_1.get_parameter('input_index') and '0' != node_1.get_parameter('input_index'):
value[0] = value[0]-node_1.get_parameter('pads')[0]-node_1.get_parameter('pads')[2]
value[1] = value[1]-node_1.get_parameter('pads')[1]-node_1.get_parameter('pads')[3]
node_1.add_parameter(key,value)
if 'AveragePool' in node_1.name and 'Relu' in node_2.name:
ks_tot = 1
# "ReLU" nodes don't have the k attribute
try:
kappa = node_2.k
except AttributeError:
kappa = 1
for k in node_1.kernel_shape:
ks_tot *= k
# the multiplier must include division by total kernel area
node_1.outmul = int(np.round(node_2.outmul*kappa/(ks_tot)))
# move "lambda" to scalar out_add parameter here
node_1.out_add = int(node_2.get_parameter('lambda'))
node_1.requant_pool = True
return node_1
def fuse_Add(self, node_1, node_2):
assert (node_1.get_parameter('output_index') == node_2.get_parameter('input_index')), f"Error in fusion of near nodes with different indexes"
node_2.add_parameter('name', node_1.get_parameter('name')+node_2.get_parameter('name') )
node_2.add_parameter('input_index', node_1.get_parameter('input_index'))
return node_2
def fuse_graph_BNReluADD(self):
BNRelu_fused = []
for j, node_1 in enumerate(self.PULP_Nodes_Graph):
if node_1.name == 'BNRelu':
for i, node_2 in enumerate(self.PULP_Nodes_Graph):
if node_1.name+node_2.name in '.'.join([*self.backend]) and node_1.output_index == node_2.input_index:
self.PULP_Nodes_Graph[i] = self.fuse_Add(node_1, node_2)
BNRelu_fused.append(j)
break
PULP_Nodes_Graph_fused = []
for j, node in enumerate(self.PULP_Nodes_Graph):
if j not in BNRelu_fused:
PULP_Nodes_Graph_fused.append(node)
self.PULP_Nodes_Graph = PULP_Nodes_Graph_fused
```
#### File: dory/NN_Deployment/Model_deployment.py
```python
import torch
import numpy as np
from tiler import Tiler
import Network_template_writer as Network_writer
import Makefile_writer as Makefile_writer
import os
import pandas as pd
from mako.template import Template
from collections import OrderedDict
import logging
class Model_deployment():
"""
Used to manage the PULP graph. By now, supported Convolutions, Pooling, Linear Layers and Relu.
"""
def __init__(self, platform, chip):
self.platform = platform
self.chip = chip
def copy_files(self, optional, layer_mixed_list,version, sdk, backend, dma_parallelization):
print("The function copy_files should be implemented in the target Backend. Exiting ...")
def copy_backend(self, BitActivation, PULP_Nodes_Graph, number_of_deployed_layers, sdk, backend, dma_parallelization, optional):
print("The function copy_backend should be implemented in the target Backend. Exiting ...")
exit(0)
def create_weights_files(self, PULP_Nodes_Graph, number_of_deployed_layers, BitActivation, load_dir):
print("The function create_weights_files should be implemented in the target Backend. Exiting ...")
exit(0)
def create_layers_tiling(self, PULP_Nodes_Graph,
number_of_deployed_layers,
L1_dimension,
l2_buffer_size,
BitActivation,
performance_single_layer,
sdk,
backend,
dma_parallelization,
number_of_clusters,
optional,
type_data = 'float'):
####################################################################################
###### SECTION 3: PARSING OF EACH LAYER INDEPENDENT. TILING + LAYER CREATION ######
####################################################################################
name_list = []
layer_list = []
stringa_features = []
name_layer_list = []
name_layer_list_internal = []
MAC_total = 0
Layers_L3_input_act = 0
Layers_L3_output_act = 0
Layers_L3_weights = 0
L2_memory_occupation = 0
factor_h_out = 1
if optional == 'auto':
optional = '8bit'
for i, nodes_to_deploy in enumerate(PULP_Nodes_Graph[:number_of_deployed_layers]):
if nodes_to_deploy.get_parameter('out_activation_bits') < 8 or nodes_to_deploy.get_parameter('input_activation_bits') < 8 or nodes_to_deploy.get_parameter('weight_bits') < 8:
optional = 'mixed-sw'
else:
pass
for i, nodes_to_deploy in enumerate(PULP_Nodes_Graph[:number_of_deployed_layers]):
if('Conv' in nodes_to_deploy.name or 'Gemm' in nodes_to_deploy.name or 'MatMul' in nodes_to_deploy.name):
layer = 'Conv'
if 'Conv' in nodes_to_deploy.name:
h_dimension = nodes_to_deploy.get_parameter('kernel_shape')[0] + nodes_to_deploy.get_parameter('input_dim')[0] + nodes_to_deploy.get_parameter('output_dim')[0]
if h_dimension == 3 and 'mixed' not in optional:
layer = 'Conv1D'
optional = '1D_Conv'
elif('Pool' in nodes_to_deploy.name):
layer = 'Pool'
elif('Add' in nodes_to_deploy.name):
layer = 'Add'
name_layer = "layer" + nodes_to_deploy.name + str(i)
######################## NEED A FIX ####################################################
#### OTHERWISE ONLY WEIGHT < L2/2 GO in L2 --> much more L3 tiling not needed############
#########################################################################################
tile_factor = 1.8
if (i < len(PULP_Nodes_Graph)-1) and ('Conv' in PULP_Nodes_Graph[i+1].name or 'Gemm' in PULP_Nodes_Graph[i+1].name or 'MatMul' in PULP_Nodes_Graph[i+1].name):
if PULP_Nodes_Graph[i+1].ch_in*PULP_Nodes_Graph[i+1].ch_out*PULP_Nodes_Graph[i+1].kernel_shape[0]*PULP_Nodes_Graph[i+1].kernel_shape[1] > int(l2_buffer_size/tile_factor):
weight_overhead = int(l2_buffer_size/tile_factor)
else:
weight_overhead = int(PULP_Nodes_Graph[i+1].weight_bits*PULP_Nodes_Graph[i+1].ch_in*PULP_Nodes_Graph[i+1].ch_out*PULP_Nodes_Graph[i+1].kernel_shape[0]*PULP_Nodes_Graph[i+1].kernel_shape[1]/8) +int(PULP_Nodes_Graph[i+1].ch_out*BitActivation/8*2)
else:
weight_overhead = 0
BitIn = PULP_Nodes_Graph[i].input_activation_bits
BitOut = PULP_Nodes_Graph[i].out_activation_bits
if 'weights' in PULP_Nodes_Graph[i].__dict__:
BitW = PULP_Nodes_Graph[i].weight_bits
if i == len(PULP_Nodes_Graph)-1:
name_layer = name_layer + '_last'
if(performance_single_layer == 'Yes'):
test_location = 'L3+performance'
else:
test_location = 'L3'
tile_gen = Tiler(layer,
nodes_to_deploy.ch_out,
nodes_to_deploy.kernel_shape,
nodes_to_deploy.strides,
nodes_to_deploy.pads,
nodes_to_deploy.group,
[nodes_to_deploy.ch_in * nodes_to_deploy.group,nodes_to_deploy.input_dim[0], nodes_to_deploy.input_dim[1]],
L1_dimension,
l2_buffer_size-weight_overhead,
self.platform,
self.chip,
test_location=test_location,
BitIn=BitIn,
BitW=BitW,
BitOut=BitOut,
BitActivation = BitActivation,
optional_type=optional,
sdk = sdk,
backend = backend,
dma_parallelization = dma_parallelization,
number_of_clusters = number_of_clusters)
str_l = 'ch_in' + str(nodes_to_deploy.ch_in) + 'ch_out' + str(nodes_to_deploy.ch_out) + 'groups' + str(
nodes_to_deploy.group) + 'dim_image' + str(nodes_to_deploy.input_dim[1],) + 'pads' + ''.join([str(x) for x in nodes_to_deploy.pads]) +'stride' + str(nodes_to_deploy.strides) + 'kernel'+ str(
nodes_to_deploy.kernel_shape[0]) + str(nodes_to_deploy.kernel_shape[1]) + 'BitIn' + str(BitIn) + 'BitOut' + str(BitOut) + 'BitW' + str(BitW)
if '1D' in layer:
str_l += 'Dilation' + str(nodes_to_deploy.dilations)
name = nodes_to_deploy.name
for scan_i, _ in enumerate(stringa_features):
if(str_l == stringa_features[scan_i] and str(layer) == str(layer_list[scan_i])):
name_layer = name_layer_list[scan_i]
name = name_layer_list_internal[scan_i]
stringa_features.append(str_l)
layer_list.append(layer)
name_layer_list.append(name_layer)
name_layer_list_internal.append(name)
relu = 0
BN = 0
DW = 0
input_dim_constraint = 0
output_weights_dim_constraint = 0
if(i == 0):
weight_constraint = 0
if(i == 0):
input_L3 = 0
elif(factor_h_out > 1):
input_L3 = 1
input_dim_constraint = out_dim2
output_weights_dim_constraint = l2_buffer_size - weight_overhead - out_dim2_old
if(output_weights_dim_constraint < 0):
print("ERROR 03. Problems with current implementation on L3 tiling. Prediction of weights of next layer not accurate. Exiting...")
os._exit(0)
else:
input_L3 = 0
if('Relu' in nodes_to_deploy.name):
relu = 1
if('BN' in nodes_to_deploy.name):
BN = 1
if('DW' in nodes_to_deploy.name):
DW = 1
###### TO MODIFY ########
if 'Relu' not in nodes_to_deploy.name:
nodes_to_deploy.outmul = 1
if 'bias' in nodes_to_deploy.__dict__:
h_b = 1
else:
h_b = 0
if('Conv1D' in layer):
d = dict(X=0, Y=0, W=0,
relu=relu, BN=BN,
type_data = type_data,
dilation=nodes_to_deploy.dilations,
has_bias=h_b,
out_mul=nodes_to_deploy.outmul,
out_shift=nodes_to_deploy.outshift,
name=name_layer)
elif('Gemm' in nodes_to_deploy.name or 'Conv' in nodes_to_deploy.name or 'MatMul' in nodes_to_deploy.name):
d = dict(X=0, Y=0, W=0,
relu=relu, BN=BN, DW=DW,
type_data = type_data,
has_bias=h_b,
out_mul=nodes_to_deploy.outmul,
out_shift=nodes_to_deploy.outshift,
name=name_layer,
input_L3 = input_L3,
input_dim_constraint = input_dim_constraint,
output_weights_dim_constraint = output_weights_dim_constraint,
weight_constraint = weight_constraint)
elif('Pool' in nodes_to_deploy.name):
d = dict(X=0, Y=0, W=0,
relu=relu, BN = BN,
type_data = type_data,
out_mul=nodes_to_deploy.outmul,
out_shift=nodes_to_deploy.outshift,
out_add = nodes_to_deploy.out_add,
name=name_layer,
input_L3 = input_L3,
input_dim_constraint = input_dim_constraint,
output_weights_dim_constraint = output_weights_dim_constraint,
type=name)
elif('Add' in nodes_to_deploy.name):
d = dict(X=0, Y=0, W=0,
relu=relu,
type_data = type_data,
out_mul1=nodes_to_deploy.inmul1,
out_mul2=nodes_to_deploy.inmul2,
out_shift=nodes_to_deploy.outshift,
name=name_layer,
type=name)
in_dim2, out_dim2, weights_dim, l1_dim2, L3_tiling, factor_ch_out, factor_h_out, factor_h_in = tile_gen.get_tiling(**d)
if(factor_ch_out > 1):
PULP_Nodes_Graph[i].L3_allocation = 1
else:
PULP_Nodes_Graph[i].L3_allocation = 0
Layers_L3_input_act += int(factor_h_in > 1)
Layers_L3_output_act += int(factor_h_out > 1)
Layers_L3_weights += int(factor_ch_out > 1)
PULP_Nodes_Graph[i].L3_input = int(factor_h_in > 1)
PULP_Nodes_Graph[i].L3_output = int(factor_h_out > 1)
PULP_Nodes_Graph[i].L3_weights = int(factor_ch_out > 1)
if(i == 0):
out_dim2_old = in_dim2
if(factor_h_out > 1):
out_dim2 = l2_buffer_size - weight_overhead - out_dim2_old - weights_dim
out_dim2_old = int(out_dim2*BitOut/8)
while weights_dim % 4 != 0:
weights_dim += 1
if(weight_overhead == int(l2_buffer_size/2)):
weight_constraint = int(l2_buffer_size/2)
else:
weight_constraint = 0
if(L3_tiling == 1):
name_layer = name_layer + 'L3'
try:
PULP_Nodes_Graph[i].input_activation_dimensions_L3 = int(PULP_Nodes_Graph[i].input_dim[0] * PULP_Nodes_Graph[i].input_dim[1] * PULP_Nodes_Graph[i].ch_in*BitIn/8)
except:
PULP_Nodes_Graph[i].input_activation_dimensions_L3 = int(PULP_Nodes_Graph[i].input_dim * PULP_Nodes_Graph[i].ch_in*BitIn/8)
try:
PULP_Nodes_Graph[i].output_activation_dimensions_L3 = int(PULP_Nodes_Graph[i].output_dim[0] * PULP_Nodes_Graph[i].output_dim[1] * PULP_Nodes_Graph[i].ch_out*BitOut/8)
except:
PULP_Nodes_Graph[i].output_activation_dimensions_L3 = int(PULP_Nodes_Graph[i].output_dim * PULP_Nodes_Graph[i].ch_out*BitOut/8)
name_list.append(name_layer)
if('Gemm' in nodes_to_deploy.name or 'Conv' in nodes_to_deploy.name or 'MatMul' in nodes_to_deploy.name):
if(i > 0):
PULP_Nodes_Graph[i].weights_dimension = PULP_Nodes_Graph[i-1].weights_dimension + weights_dim
else:
PULP_Nodes_Graph[i].weights_dimension = weights_dim
else:
PULP_Nodes_Graph[i].weights_dimension = PULP_Nodes_Graph[i-1].weights_dimension
if('Gemm' in nodes_to_deploy.name or 'Conv' in nodes_to_deploy.name or 'MatMul' in nodes_to_deploy.name):
if(factor_ch_out == 1):
if(i > 0):
PULP_Nodes_Graph[i].weights_dimension_L3 = PULP_Nodes_Graph[i-1].weights_dimension_L3 + weights_dim
else:
PULP_Nodes_Graph[i].weights_dimension_L3 = weights_dim
else:
if(i > 0):
PULP_Nodes_Graph[i].weights_dimension_L3 = PULP_Nodes_Graph[i-1].weights_dimension_L3 + int(weights_dim*factor_ch_out/2)
else:
PULP_Nodes_Graph[i].weights_dimension_L3 = int(weights_dim*factor_ch_out/2)
else:
PULP_Nodes_Graph[i].weights_dimension_L3 = PULP_Nodes_Graph[i-1].weights_dimension_L3
PULP_Nodes_Graph[i].input_activation_dimensions = int(in_dim2*BitIn/8)
if(factor_h_out > 1):
PULP_Nodes_Graph[i].output_activation_dimensions = int(out_dim2)
else:
PULP_Nodes_Graph[i].output_activation_dimensions = int(out_dim2*BitOut/8)
if(i > 0):
if(PULP_Nodes_Graph[i].input_activation_dimensions != PULP_Nodes_Graph[i-1].output_activation_dimensions) and PULP_Nodes_Graph[i-1].L3_output==1:
PULP_Nodes_Graph[i].input_activation_dimensions = PULP_Nodes_Graph[i-1].output_activation_dimensions
PULP_Nodes_Graph[i].l1_dimensions = l1_dim2
if('Pool' not in nodes_to_deploy.name):
MAC_total += nodes_to_deploy.MACs
return PULP_Nodes_Graph, Layers_L3_input_act, Layers_L3_output_act, Layers_L3_weights, name_layer_list, name_list, MAC_total
def generate_intermediate_activations(self, PULP_Nodes_Graph,
load_dir,
number_of_deployed_layers,
check_layer,
weights_to_write):
######################################################################################
###### SECTION 4: GENERATE CHECKSUM BY USING WEIGHT AND OUT_LAYER{i}.TXT FILES ######
######################################################################################
try:
x_in = pd.read_csv(os.path.join(load_dir, 'input.txt'))
x_in = x_in.values[:, 0].astype(int)
except:
print(f"========= WARNING ==========\nInput file {os.path.join(load_dir, 'input.txt')} not found; generating random inputs!")
x_in = torch.Tensor(1, PULP_Nodes_Graph[0].group, PULP_Nodes_Graph[0].ch_in, PULP_Nodes_Graph[0].input_dim[0], PULP_Nodes_Graph[0].input_dim[1]).uniform_(0, (2**(9)))
x_in[x_in > (2**8 - 1)] = 0
x_in = torch.round(x_in)
x_in = x_in.flatten().numpy().astype(int)
for i, _ in enumerate(x_in):
x_in[i] = np.uint8(x_in[i])
PULP_Nodes_Graph[0].check_sum_in = sum(x_in)
f_w = 0
for f, nodes_to_deploy in enumerate(PULP_Nodes_Graph[:number_of_deployed_layers]):
X_in = pd.read_csv(os.path.join(load_dir, f'out_layer{f}.txt'))
X_in = X_in.values[:, 0].astype(int)
if f == len(PULP_Nodes_Graph[:number_of_deployed_layers]) - 1:
class_out = int(np.where(X_in == np.max(X_in))[0][0])
for i, _ in enumerate(X_in):
X_in[i] = np.uint8(X_in[i])
BitIn = nodes_to_deploy.input_activation_bits
BitOut = nodes_to_deploy.out_activation_bits
Input_compressed = []
z = 0
import copy
Loop_over = copy.deepcopy(X_in)
if f != len(PULP_Nodes_Graph[:number_of_deployed_layers]) - 1:
for _, i_x in enumerate(Loop_over):
if (z % int(8 / BitOut)) == 0:
Input_compressed.append(int(i_x.item()))
else:
Input_compressed[-1] += int(i_x.item()) << (BitOut * (z % int(8 / BitOut)))
z += 1
if check_layer == f:
act_compare = Input_compressed
PULP_Nodes_Graph[f].check_sum_out = sum(Input_compressed)
if f == len(PULP_Nodes_Graph) - 1:
if 'Gemm' in nodes_to_deploy.name or 'MatMul' in nodes_to_deploy.name:
ww = np.asarray(nodes_to_deploy.weights_raw).reshape(nodes_to_deploy.ch_out,nodes_to_deploy.ch_in).astype(np.int8).astype(int)
X_in = pd.read_csv(os.path.join(load_dir, f'out_layer{f-1}.txt'))
X_out = pd.read_csv(os.path.join(load_dir, f'out_layer{f}.txt'))
X_in = X_in.values[:, 0].astype(int).reshape(X_in.shape[0],1)
try:
PULP_Nodes_Graph[f].check_sum_out = sum(sum(np.matmul(ww,X_in)))
except:
PULP_Nodes_Graph[f].check_sum_out = 0
if f != len(PULP_Nodes_Graph[:number_of_deployed_layers]) - 1:
PULP_Nodes_Graph[f + 1].check_sum_in = sum(Input_compressed)
if 'Gemm' in nodes_to_deploy.name or 'Conv' in nodes_to_deploy.name or 'MatMul' in nodes_to_deploy.name:
PULP_Nodes_Graph[f].check_sum_w = int(sum(weights_to_write[f_w]))
f_w += 1
else:
PULP_Nodes_Graph[f].check_sum_w = 0
return PULP_Nodes_Graph, class_out
def print_model_network(self, PULP_Nodes_Graph,
number_of_deployed_layers=29,
load_dir='./mnistNet/',
check_layer=0,
verbose_level='None',
performance_single_layer='Yes',
L1_dimension = 35000,
master_stack = 4096,
slave_stack = 3072,
l2_buffer_size = 400000,
fc_frequency = 100000000,
cl_frequency = 100000000,
BitActivation = 32,
sdk='gap_sdk',
backend='MCU',
dma_parallelization='8-cores',
number_of_clusters = 1,
optional = 'auto',
type_data = 'char'):
# Function used to create all the files for the application
# copy backend is used to copy all the files of the backend
self.copy_backend(BitActivation, PULP_Nodes_Graph, number_of_deployed_layers, sdk, backend, dma_parallelization, optional)
# create L3 files for weights. These files are .hex which are copied in hyperflash then
PULP_Nodes_Graph, weights_files_list, weights_to_write = self.create_weights_files(PULP_Nodes_Graph, number_of_deployed_layers, BitActivation, load_dir)
fileh = logging.FileHandler('logs/Tiling_profiling.log', 'a')
formatter = logging.Formatter('%(asctime)s - %(message)s')
fileh.setFormatter(formatter)
fileh.setLevel(logging.DEBUG)
log = logging.getLogger()
for hdlr in log.handlers[:]:
log.removeHandler(hdlr)
log.addHandler(fileh)
print("Creating tiling profiling in Tiling_profling.log")
# tiling of all the layers. Both tiling and layer generation
PULP_Nodes_Graph, num_L3_input_tile, num_L3_output_tile, num_L3_weight_tile, name_layer_list, name_list, MAC_total = self.create_layers_tiling(PULP_Nodes_Graph,
number_of_deployed_layers,
L1_dimension,
l2_buffer_size,
BitActivation,
performance_single_layer,
sdk,
backend,
dma_parallelization,
number_of_clusters,
optional,
type_data = type_data)
logging.debug(" ")
logging.debug(" Layers with L3 input activation: " + str(num_L3_input_tile))
logging.debug(" Layers with L3 output activation: " + str(num_L3_output_tile))
logging.debug(" Layers with L3 weights: " + str(num_L3_weight_tile))
name_layer_list_unique = list(set(name_layer_list))
for i, _ in enumerate(name_layer_list_unique):
name_layer_list_unique[i] = name_layer_list_unique[i] + ".c"
for i, nodes_to_deploy in enumerate(PULP_Nodes_Graph[:number_of_deployed_layers]):
if nodes_to_deploy.L3_allocation == 1:
name_layer_list_unique.append(name_layer_list[i] + "L3" + ".c")
# compute the checksums for intermediate activations checking
if 'Check' in verbose_level or 'Last' in verbose_level:
PULP_Nodes_Graph, class_out = self.generate_intermediate_activations(PULP_Nodes_Graph,
load_dir,
number_of_deployed_layers,
check_layer,
weights_to_write)
else:
class_out = 0
if check_layer == 100:
act_compare = np.asarray([0, 0])
act_size = [0, 0, 0]
else:
act_size = [PULP_Nodes_Graph[check_layer].output_dim[0], PULP_Nodes_Graph[check_layer].output_dim[1], PULP_Nodes_Graph[check_layer].ch_out]
## printf the network file. It calls all the layer functions
Network_writer.print_template_network(
weights_files_list,
PULP_Nodes_Graph[:number_of_deployed_layers],
'char',
name=name_list,
test=True,
has_bias=True,
verbose_level=verbose_level,
performance_single_layer = performance_single_layer,
check_layer=check_layer,
act_compare=act_compare,
act_size=act_size,
class_out=class_out,
l1_buffer=L1_dimension,
master_stack = master_stack,
slave_stack = slave_stack,
l2_buffer_size = l2_buffer_size,
fc_frequency = fc_frequency,
cl_frequency = cl_frequency,
MACs=MAC_total,
platform=self.platform,
sdk = sdk,
backend = backend,
dma_parallelization = dma_parallelization)
# create the Makefile for the application
Makefile_writer.print_template_Makefile(weights_files_list, self.platform, sdk, backend)
```
#### File: dory/Templates_writer/L3_templates_writer.py
```python
import math
from mako.template import Template
import re
from collections import OrderedDict
import numpy as np
import sys
import os
import re
def print_pool_template_layer_L3(X, W, Y, fs1, fs2, padding, stride,
factor_ch_out,
factor_h_out,
factor_h_in,
name,
out_dim1,
in_dim1,
in_dim_full,
w_out,
h_out,
n_out,
w_in,
h_in,
n_in,
full_net,
platform,
data_type_x,
data_type_y,
test_location,
buffer_l1_all,
input_L3,
backend
):
# generation of L3 layers. The layers are generated with this infrustructure if an L3 tiling is demanded.
tk = OrderedDict([])
conv_overlap1 = 2 * (fs1 // 2) + fs1 % 2 - 1 - (stride - 1)
conv_overlap2 = 2 * (fs2 // 2) + fs2 % 2 - 1 - (stride - 1)
tk['conv_overlap1'] = conv_overlap1
tk['conv_overlap2'] = conv_overlap2
tk['padding'] = padding
tk['input_L3'] = input_L3
tk['n_tile_W'] = int(factor_ch_out)
tk['n_tile_x'] = int(factor_h_in)
tk['n_tile_y'] = int(factor_h_out)
tk['verbose'] = False
tk['func_name'] = name
tk['func_name_L3'] = name[0] + 'L3'
tk['act_out_dim_partial'] = int(out_dim1)
tk['w_out'] = w_out
tk['h_out'] = h_out
tk['n_out'] = n_out
tk['w_in'] = w_in
tk['h_in'] = h_in
tk['n_in'] = n_in
tk['dim_out'] = out_dim1
tk['dim_in'] = in_dim1
tk['platform'] = platform
tk['y_data_size_byte'] = data_type_y
tk['x_data_size_byte'] = data_type_x
tk['BitIn'] = data_type_x
tk['BitOut'] = data_type_y
tk['weight_dim'] = 0
tk['k_dim'] = 0
tk['lambda_dim'] = 0
root = '/'.join(os.getcwd().split('/')[:-1])
tmpl = Template(filename=root + f"/Templates/{backend}/layer_templates/layer_template_L3.c", strict_undefined=True)
l = ""
s = tmpl.render(verbose_log=l,**tk)
#
save_string = './application/DORY_network/src/' + tk['func_name_L3'] + '.c'
with open(save_string, "w") as f: f.write(s)
tmpl = Template(filename=root + f"/Templates/{backend}/layer_templates/layer_template_L3-h.h")
s = tmpl.render(verbose_log=l, **tk)
if full_net == 1:
save_string = './application/DORY_network/inc/' + \
tk['func_name_L3'] + '.h'
else:
save_string = './applicationL3/DORY_network/inc/' + \
tk['func_name_L3'] + '.h'
with open(save_string, "w") as f:
f.write(s)
if 'partial' in test_location:
string_layer = "inputs.hex"
save_s = './application/DORY_network/' + string_layer
with open(save_s, 'wb') as f:
for i in X.astype('uint8').flatten():
f.write(bytes((i,)))
tk['x_content'] = print_test_vector(X, 'char')
if tk['n_tile_W'] == 1:
tk['W_content'] = print_test_vector(W, 'char')
tk['weight_dim'] = W.shape[0]
tk['check_sum'] = sum(Y)
tk['activation_size_out'] = out_dim1
tk['activation_size_in'] = in_dim1
tk['activation_size_in_full'] = in_dim_full
tk['func_nameL3'] = tk['func_name_L3']
tk['file'] = name[0][5:] + '_weights.hex'
tk['buffer_l1_all'] = buffer_l1_all
tmpl = Template(filename=root + f"/Templates/{backend}/test_templateL3.c")
s = tmpl.render(**tk)
save_string = './application/DORY_network/src/main.c'
with open(save_string, "w") as f: f.write(s)
def print_template_layer_L3(X, W, Y, fs1, fs2, padding, stride,
BitIn, BitW, BitOut,
factor_ch_out,
factor_h_out,
factor_h_in,
name,
out_dim1,
in_dim1,
in_dim_full,
weight_dim1,
lambda_dim,
k_dim,
w_out,
h_out,
n_out,
w_in,
h_in,
n_in,
full_net,
platform,
data_type_x,
data_type_y,
test_location,
out_mul, out_shift,
buffer_l1_all,
input_L3,
backend
):
# generation of L3 layers. The layers are generated with this infrustructure if an L3 tiling is demanded.
tk = OrderedDict([])
conv_overlap1 = 2 * (fs1 // 2) + fs1 % 2 - 1 - (stride - 1)
conv_overlap2 = 2 * (fs2 // 2) + fs2 % 2 - 1 - (stride - 1)
tk['conv_overlap1'] = conv_overlap1
tk['conv_overlap2'] = conv_overlap2
tk['BitIn'] = BitIn
tk['BitW'] = BitW
tk['BitOut'] = BitOut
tk['padding'] = padding
tk['input_L3'] = input_L3
tk['n_tile_W'] = int(factor_ch_out)
tk['n_tile_x'] = int(factor_h_in)
tk['n_tile_y'] = int(factor_h_out)
tk['verbose'] = False
tk['func_name'] = name
tk['func_name_L3'] = name[0] + 'L3'
tk['act_out_dim_partial'] = int(out_dim1)
tk['weight_dim'] = int(weight_dim1)
tk['lambda_dim'] = lambda_dim
tk['k_dim'] = k_dim
tk['w_out'] = w_out
tk['h_out'] = h_out
tk['n_out'] = n_out
tk['w_in'] = w_in
tk['h_in'] = h_in
tk['n_in'] = n_in
tk['dim_out'] = out_dim1
tk['dim_in'] = in_dim1
tk['platform'] = platform
tk['y_data_size_byte'] = data_type_y
tk['x_data_size_byte'] = data_type_x
root = '/'.join(os.getcwd().split('/')[:-1])
tmpl = Template(filename=root + f"/Templates/{backend}/layer_templates/layer_template_L3.c")
l = ""
s = tmpl.render(verbose_log=l,**tk)
#
save_string = './application/DORY_network/src/' + tk['func_name_L3'] + '.c'
with open(save_string, "w") as f: f.write(s)
tmpl = Template(filename=root + f"/Templates/{backend}/layer_templates/layer_template_L3-h.h")
s = tmpl.render(verbose_log=l, **tk)
if full_net == 1:
save_string = './application/DORY_network/inc/' + \
tk['func_name_L3'] + '.h'
else:
save_string = './applicationL3/DORY_network/inc/' + \
tk['func_name_L3'] + '.h'
with open(save_string, "w") as f:
f.write(s)
if 'partial' in test_location:
string_layer = "inputs.hex"
save_s = './application/DORY_network/' + string_layer
with open(save_s, 'wb') as f:
for i in X.astype('uint8').flatten():
f.write(bytes((i,)))
tk['x_content'] = print_test_vector(X, 'char')
if tk['n_tile_W'] == 1:
tk['W_content'] = print_test_vector(W, 'char')
tk['weight_dim'] = W.shape[0]
tk['check_sum'] = sum(Y)
tk['activation_size_out'] = out_dim1
tk['activation_size_in'] = in_dim1
tk['activation_size_in_full'] = in_dim_full
tk['out_mul'] = out_mul
tk['out_shift'] = out_shift
tk['func_nameL3'] = tk['func_name_L3']
tk['file'] = name[0][5:] + '_weights.hex'
tk['buffer_l1_all'] = buffer_l1_all
tmpl = Template(filename=root + f"/Templates/{backend}/test_templateL3.c")
s = tmpl.render(**tk)
save_string = './application/DORY_network/src/main.c'
with open(save_string, "w") as f: f.write(s)
```
#### File: dory/Templates_writer/writer_utils.py
```python
import math
from mako.template import Template
import re
from collections import OrderedDict
import numpy as np
import sys
import os
import re
def print_file_list(x):
# This function is used to generate a string with all input files.
s = repr(x).replace("[", "").replace("]", "").replace("'", '"')
return s
def print_test_vector(x, type_data):
# Print the test vector in the c file.
if type_data == 'char':
try:
np.set_printoptions(
threshold=sys.maxsize,
formatter={'int': lambda x: hex(np.uint8(x)) if (
x < 0) else hex(np.uint8(x)), }
)
except TypeError:
np.set_printoptions(threshold=sys.maxsize)
s = repr(x.flatten()).replace("array([", "").replace("]", "").replace("[", "").replace(")", "").replace(",\n dtype=int8)", "").replace(", dtype=uint8", "").replace(",\n dtype=uint8)", "").replace(",\n dtype=uint8", "").replace(",\n dtype=int8", "").replace(", dtype=int8", "").replace(", dtype=int8)", "").replace(", dtype=int8)", "").replace(", dtype=uint8)", "")
elif type_data == 'int16_t':
try:
np.set_printoptions(
threshold=sys.maxsize,
formatter={'int': lambda x: hex(np.uint16(x)) if (
x < 0) else hex(np.int16(x)), }
)
except TypeError:
np.set_printoptions(threshold=sys.maxsize)
s = repr(x.flatten()).replace("array([", "").replace("]", "").replace("[", "").replace(",\n dtype=int16)", "").replace(
", dtype=int16)", "").replace(", dtype=int16)", "").replace(", dtype=uint16)", "").replace(")", "")
else:
try:
np.set_printoptions(
threshold=sys.maxsize,
formatter={'int': lambda x: hex(np.uint32(x)) if (
x < 0) else hex(np.int32(x)), }
)
except TypeError:
np.set_printoptions(threshold=sys.maxsize)
s = repr(x.flatten()).replace("array([", "").replace("]", "").replace("[", "").replace(
",\n dtype=int32)", "").replace(", dtype=int32)", "").replace(", dtype=int32)", "").replace(", dtype=uint32)", "")
return s
```
#### File: dory/Tiler/tiler_conv1d.py
```python
import math
import numpy as np
import torch
import torch.nn as nn
# constraint solver for optimization
from ortools.constraint_solver import pywrapcp
from ortools.constraint_solver import solver_parameters_pb2
# template for output
from Layer2D_templates_writer import print_template_layer
from Layer1D_templates_writer import print_template_layer_1D
from L3_templates_writer import print_template_layer_L3
from L3_templates_writer import print_pool_template_layer_L3
import logging
import os
import sys
class Tiler_Conv1D():
# Class to generate the Tiling of the layer.
def __init__(self,tiler):
self.module = tiler.module
self.out_ch = tiler.out_ch
self.filter_size = tiler.filter_size
self.stride = tiler.stride
self.padding = tiler.padding
self.groups = tiler.groups
self.x_shape = tiler.x_shape
self.buffer_size = tiler.buffer_size
self.L2_buffer_size = tiler.L2_buffer_size
self.platform = tiler.platform
self.chip = tiler.chip
self.test_location = tiler.test_location
self.BitIn = tiler.BitIn
self.BitW = tiler.BitW
self.BitOut = tiler.BitOut
self.BitActivation = tiler.BitActivation
self.optional_type = tiler.optional_type
self.sdk = tiler.sdk
self.backend = tiler.backend
self.dma_parallelization = tiler.dma_parallelization
self.number_of_clusters = tiler.number_of_clusters
def get_tiling(self, X, Y, W,
relu,
BN,
dilation,
has_bias,
out_mul, out_shift,
type_data='char',
full_computation=False,
multiple_buffering_factor=2,
name='conv',
forcing ='None'
):
# This function generate the layer function to be included in the project for the conv2d operations (Convolutions and Fully Connected layers).
ds_x = self.BitIn
ds_y = self.BitOut
ds_W = self.BitW
fs1 = self.filter_size[1]
p_left = self.padding[1]
p_right = self.padding[3]
n_in = self.x_shape[0]
n_out = self.out_ch
name_include = []
# L3 tiling
h_in = self.x_shape[-2]
w_in = self.x_shape[-1]
h_out = 1
if dilation > 1:
w_out = int(np.floor((w_in - ((fs1 - 1)*dilation) + p_left + p_right + (self.stride - 1)) / self.stride))
else:
w_out = int(np.floor((w_in - (fs1 - 1) + p_left + p_right + (self.stride - 1)) / self.stride))
if p_left==0 and p_right==0 and dilation ==1:
tiling_MAC_cycle_nodilation = self.get_tiling_conv_1D_nodilation(
fs1,
0,
self.stride,
dilation,
n_in,
n_out,
w_in,
w_out,
BN,
buffer_size=self.buffer_size
)
tiling_MAC_cycle_normal = self.get_tiling_conv_1D_normal(fs1,
self.padding[1],
self.stride,
dilation,
n_in,
n_out,
w_in,
w_out,
BN,
buffer_size=self.buffer_size
)
tiling_MAC_cycle_indirect = self.get_tiling_conv_1D_indirect(fs1,
self.padding[1],
self.stride,
dilation,
n_in,
n_out,
w_in,
w_out,
BN,
buffer_size=self.buffer_size
)
if p_left==0 and p_right==0 and dilation ==1:
_, _, _, _, MAC_cycle_nodilation, _ = tiling_MAC_cycle_nodilation
else:
MAC_cycle_nodilation = 0
_, _, _, _, MAC_cycle_normal, _ = tiling_MAC_cycle_normal
_, _, _, _, MAC_cycle_indirect, _ = tiling_MAC_cycle_indirect
max_MAC = MAC_cycle_nodilation
layer_type = 'nodilation'
if p_left==0 and p_right==0 and dilation ==1:
tiling = tiling_MAC_cycle_nodilation
if MAC_cycle_normal > max_MAC:
max_MAC = MAC_cycle_normal
layer_type = 'normal'
tiling = tiling_MAC_cycle_normal
if MAC_cycle_indirect > max_MAC:
max_MAC = MAC_cycle_indirect
layer_type = 'indirect'
tiling = tiling_MAC_cycle_indirect
### FOR TEST
if forcing == 'normal':
max_MAC = MAC_cycle_normal
layer_type = 'normal'
tiling = tiling_MAC_cycle_normal
elif forcing == 'indirect':
max_MAC = MAC_cycle_indirect
layer_type = 'indirect'
tiling = tiling_MAC_cycle_indirect
elif forcing == 'nodilation':
max_MAC = MAC_cycle_nodilation
layer_type = 'nodilation'
tiling = tiling_MAC_cycle_nodilation
if tiling is not None:
tile_n_in, tile_n_out, tile_w_in, tile_w_out, MAC_cycle, memory = tiling
x_tot_str = '[%dx%d]' % (n_in, w_in)
y_tot_str = '[%dx%d]' % (n_out, w_out)
W_tot_str = '[%dx%dx%d]' % (n_out, n_in, fs1)
x_tot_size_str = "%.2f KiB" % (1. / 1024. / 8. * (ds_x * n_in * w_in )) if ds_x * \
n_in * h_in > 1024 else '%d B' % (ds_x * n_in * w_in * 1 / 8.)
y_tot_size_str = '%.2f KiB' % (1. / 1024. / 8. * (ds_y * n_out * w_out )) if ds_y * \
n_out * h_out * w_out > 1024 else '%d B' % (ds_y * n_out * w_out * 1 / 8.)
W_tot_size_str = '%.2f KiB' % (1. / 1024. / 8. * (ds_W * n_out * n_in * fs1)) if ds_W * \
n_out * n_in * fs1 > 1024 else '%d B' % (ds_W * n_out * n_in * fs1 * 1 / 8.)
x_tile_str = '[%dx%d]' % (tile_n_in, tile_w_in)
y_tile_str = '[%dx%d]' % (tile_n_out, tile_w_out)
W_tile_str = '[%dx%dx%d]' % (tile_n_out, tile_n_in, fs1)
x_size_str = "%.2f KiB" % (1. / 1024. / 8. * (ds_x * tile_n_in * tile_w_in )) if ds_x * tile_n_in * tile_w_in > 1024 else '%d B' % (ds_x * tile_n_in * tile_w_in * 1 / 8.)
y_size_str = '%.2f KiB' % (1. / 1024. / 8. * (ds_y * tile_n_out * tile_w_out)) if ds_y * tile_n_out * tile_w_out > 1024 else '%d B' % (ds_y * tile_n_out * tile_w_out * 1 / 8.)
y_no_str = '%d' % (max(math.ceil((n_out) / (tile_n_out)), 1) * max(math.ceil((w_out) / (tile_w_out)), 1))
W_size_str = '%.2f KiB' % (1. / 1024. / 8. * (ds_W * tile_n_out * tile_n_in * fs1)) if (ds_W * tile_n_out * tile_n_in * fs1) > 1024 else '%d B' % (ds_W * tile_n_out * tile_n_in * fs1 * 1 / 8.)
W_no_str = '%d' % (max(math.ceil((n_out - tile_n_out) / (tile_n_out) + 1), 1) * 1)
x_no_str = '%d' % (int(int(y_no_str)/int(W_no_str)) * pow(max(math.ceil((n_in - tile_n_in) / (tile_n_in) + 1), 1),2))
L1_tiles_size = ds_x * tile_n_in * tile_w_in / 8. * (1 + int(int(x_no_str) > 1)) + ds_y * tile_n_out * tile_w_out / 8. * (1 + int(int(y_no_str) > 1)) + n_out * 8 * 2
L1_tiles_size += (ds_W * tile_n_out * tile_n_in * fs1 / 8.) * (1 + int(int(W_no_str) > 1))
logging.debug(" L2 size:".ljust(18) + "x: " + x_tot_str.ljust(15) +"y: " + y_tot_str.ljust(15) + "W: " + W_tot_str.ljust(15))
logging.debug(" L2 buff:".ljust(18) + "x: " + x_tot_size_str.ljust(15) +"y: " + y_tot_size_str.ljust(15) + "W: " + W_tot_size_str.ljust(15))
logging.debug(" tiles L2-L1:".ljust(18) + "x: " + x_tile_str.ljust(15) +"y: " + y_tile_str.ljust(15) + "W: " + W_tile_str.ljust(15))
logging.debug(" L1 buff:".ljust(18) + "x: " + x_size_str.ljust(15) +"y: " + y_size_str.ljust(15) + "W: " + W_size_str.ljust(15))
logging.debug(" no. tiles:".ljust(18) + "x: " + x_no_str.ljust(15) +"y: " + y_no_str.ljust(15) + "W: " + W_no_str.ljust(15))
logging.debug(" Total L1 occupation:".ljust(18) + str(memory * 1.).ljust(15))
print_template_layer_1D(X, Y, W,
n_in, w_in,
n_out, w_out,
tile_n_in, tile_w_in, tile_w_out,
tile_n_out,
ds_x, ds_y, ds_W, self.BitActivation, type_data,
fs1, p_left, p_right, self.stride,
dilation,
relu, BN,
out_mul, out_shift,
name_layer=name,
test=False,
test_location=self.test_location,
has_bias=has_bias,
conv_order='PULP-NN',
optional='conv',
l1_buffer=self.buffer_size,
platform=self.platform,
chip=self.chip,
optional_type=self.optional_type,
backend = self.backend,
layer_type = layer_type)
### L2 memory calculation
n_out_temp = self.out_ch
w_in_temp = self.x_shape[-1]
#h_out_temp = int(np.floor((h_in_temp - (fs1 - 1) + p_left + p_right + (self.stride - 1)) / self.stride))
w_out_temp = int(np.floor((w_in_temp - ((fs1 - 1)*dilation) + p_left + p_right + (self.stride - 1)) / self.stride))
out_dim1 = n_out_temp * w_out_temp
n_in_temp = self.x_shape[0]
w_in_temp = self.x_shape[-1]
in_dim1 = n_in_temp * w_in_temp
n_in_temp = self.x_shape[0]
n_out_temp = self.out_ch
weights_dim = n_in_temp * n_out_temp * fs1
if BN == 1:
weights_dim +=n_out_temp * int(self.BitActivation / 4)
return in_dim1, out_dim1, weights_dim, L1_tiles_size, 0, 1, 1, 1
return None
##############################################
############ CONV 1D TILING ##################
##############################################
def get_tiling_conv_1D_normal(self, fs,
p,
stride,
dilation,
in_channels,
out_channels,
x_shape,
y_shape,
BN,
buffer_size=44000
):
#### LIMITATION: PADDING ONLY IN FIRST TILE
s = stride
n_in = in_channels
n_out = out_channels
h_in = x_shape
# p = 0
h_out = y_shape
max_tile_n_out = n_out
max_tile_n_in = n_in
min_tile_h_in = fs
min_tile_h_out = 1
# this is to renormalize all costs
max_obj_value = sys.maxsize
# constraints
input_dim = 8 * n_in * h_in
output_dim = 8 * n_out * h_out
weight_dim = 8 * n_in * n_out * fs
im2col_dim = 8 * 2 * 8 * fs * n_in
bn_dim = self.BitActivation * n_out * 2
buffer_total = input_dim + output_dim + weight_dim + im2col_dim + bn_dim
if BN == 0:
buffer_total -= bn_dim
if buffer_total <= buffer_size * 8:
### MALE MALE MALE
obj_expr = 58+(h_out/2)*(119+(n_out/4)*((14*n_in*fs/4)+159))
print(f'tile in: {n_in} x {h_in}, tile out: {n_out} x {h_out}' )
return (n_in, n_out, h_in, h_out, obj_expr, buffer_total/8)
else:
db = 2
parameters = pywrapcp.Solver.DefaultSolverParameters()
solver = pywrapcp.Solver("simple_CP", parameters)
# ottimizzato channel first per kernel HWC
tile_n_in = solver.IntVar(1, max_tile_n_in, 'tile_n_out')
tile_n_out = solver.IntVar(1, max_tile_n_out, 'tile_n_out')
tile_h_in = solver.IntVar(min_tile_h_in, h_in, 'tile_h_in')
tile_h_out = solver.IntVar(min_tile_h_out, h_out, 'tile_h_out')
zero_variable = solver.IntVar(0, 0, 'zero variable')
h_out_intvar = solver.IntVar(min_tile_h_out,h_out,'h_out_intvar')
solver.Add(h_out_intvar == h_out)
if ((fs - 1)*dilation+1)*2 <= h_in:
solver.Add(0 == (tile_h_in - ((fs - 1)*dilation+1)) % s)
solver.Add(tile_h_out * s == (tile_h_in - (fs - 1)*dilation + (s - 1)))
# padding added
solver.Add(solver.Max((h_in - tile_h_in - (tile_h_in - (fs - 1)*dilation - p)), 0) % (tile_h_in - (fs - 1)*dilation + 1) + abs(solver.Min(solver.Max((h_in - tile_h_in - (tile_h_in - (fs - 1)*dilation - p)), 0) % (tile_h_in - (fs - 1)*dilation), 1) - 1) * ((fs - 1)*dilation+1) >= ((fs - 1)*dilation+1))
#TO MAKE SURE TILING doesn't fail for dilation
solver.Add(h_in >= s*(tile_h_out*(h_out_intvar//tile_h_out)-1)-p+dilation*(fs-1)+1)
else:
solver.Add(h_in == tile_h_in )
solver.Add(h_out == tile_h_out )
solver.Add(tile_n_in == n_in)
constr_in = db * 8 * tile_n_in * tile_h_in #* tile_w_in
constr_out = db * 8 * tile_n_out * tile_h_out #* tile_w_out
constr_weight = db * 8 * tile_n_in * tile_n_out * fs
constr_im2col = 2 * 8 * 8 * fs * tile_n_in
constr_bn = self.BitActivation * n_out * 2
constraint_all = constr_in + constr_out + constr_weight + constr_bn + constr_im2col + 20*32 # 20 are the + 4 added between buffers
if BN == 0:
constraint_all -= constr_bn
solver.Add(constraint_all <= buffer_size * 8)
# objective
obj_expr = solver.IntVar(0, max_obj_value, "obj_expr")
n_tiles_h = solver.IntVar(1, h_in, "n_tiles_h")
leftover_tile_h_out = solver.IntVar(0, h_out, "leftover_tile_h_out")
n_tiles_nout = solver.IntVar(1, n_out, "n_tiles_nout")
leftover_tile_nout = solver.IntVar(0, n_out, "leftover_tile_nout")
solver.Add(n_tiles_h == (h_out + tile_h_out - 1) // tile_h_out)
solver.Add(leftover_tile_h_out == (h_out + zero_variable) % tile_h_out)
solver.Add(n_tiles_nout == (n_out + tile_n_out - 1) // tile_n_out)
solver.Add(leftover_tile_nout == (n_out + zero_variable) % tile_n_out)
solver.Add(obj_expr == (64 * 10000 * tile_n_out
+ constraint_all
+ 64 * 1000000 * ((tile_h_out - 1) % 16)
+ 64 * 1000000 * ((tile_n_out - 1) % 4)
+ 64 * 10000 * (leftover_tile_nout)
+ 64 * 10000 * (leftover_tile_nout % 4)
+ 64 * 10000 * (leftover_tile_h_out % 16)))
objective = solver.Maximize(obj_expr, 1)
decision_builder = solver.Phase([tile_n_in, tile_n_out, tile_h_in, tile_h_out, n_tiles_h, leftover_tile_h_out, n_tiles_nout, leftover_tile_nout],
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE)
# Create a solution collector.
collector = solver.LastSolutionCollector()
# Add the decision variables.
collector.Add(tile_n_in)
collector.Add(tile_n_out)
collector.Add(tile_h_in)
collector.Add(tile_h_out)
collector.Add(n_tiles_h)
collector.Add(leftover_tile_h_out)
collector.Add(n_tiles_nout)
collector.Add(leftover_tile_nout)
collector.Add(obj_expr)
collector.Add(constraint_all)
# Add the objective.
collector.AddObjective(obj_expr)
solver.Solve(decision_builder, [objective, collector])
if collector.SolutionCount() > 0:
best_solution = collector.SolutionCount() - 1
tile_n_in = collector.Value(best_solution, tile_n_in)
tile_n_out = collector.Value(best_solution, tile_n_out)
tile_h_in = collector.Value(best_solution, tile_h_in)
tile_h_out = collector.Value(best_solution, tile_h_out)
n_tiles_h = collector.Value(best_solution, n_tiles_h)
leftover_tile_h_out = collector.Value(best_solution, leftover_tile_h_out)
n_tiles_nout = collector.Value(best_solution, n_tiles_nout)
leftover_tile_nout = collector.Value(best_solution, leftover_tile_nout)
obj_expr = collector.Value(best_solution, obj_expr)
memory = collector.Value(best_solution, constraint_all)
if tile_h_in >= h_in:
tile_h_in = h_in
tile_h_out = int((tile_h_in -(fs - 1)*dilation + (2*p) + (s - 1))/s)
MAC = tile_n_out*tile_n_in*tile_h_out*fs
cycles_im2col = (61 + 32 * fs) * ((32 * fs) > ((fs * tile_n_in * 2) // 8) ) + (29 + (fs * tile_n_in * 2) // 8) * ((32 * fs) < ((fs * tile_n_in * 2) // 8) )
cycles_im2col_leftover = (38 + 16 * fs) * ((16 * fs) > ((fs * tile_n_in) // 8) ) + (29 + (fs * tile_n_in) // 8) * ((16 * fs) < ((fs * tile_n_in) // 8) )
n_4_2 = ((tile_h_out // 8 + ((tile_h_out % 8) > 0)) // 2)
n_4_1 = ((tile_h_out // 8 + ((tile_h_out % 8) > 0)) % 2)
cycles_chout_4_2 = (tile_n_out // 4) * (159 + 14 * ((tile_n_in*fs) // 4) + 18 * ((tile_n_in*fs) % 4))
cycles_leftover_chout_4_2 = (tile_n_out % 4) * (52 + ((tile_n_in*fs) // 4) * 5 + 8 * ((tile_n_in*fs) % 4))
cycles_chout_4_1 = (tile_n_out // 4) * (82 + ((tile_n_in*fs) //4) * 9 + 12 * ((tile_n_in*fs) % 4))
cycles_leftover_chout_4_1 = (tile_n_out % 4) * (28 + 3 * ((tile_n_in*fs) // 4) + 5 * ((tile_n_in*fs) % 4))
instr = (100 + n_4_2 * (27 + cycles_im2col + (127 + cycles_chout_4_2 + cycles_leftover_chout_4_2)) + n_4_1 * (20 + cycles_im2col_leftover + (94 + cycles_chout_4_1 + cycles_leftover_chout_4_1)))
constr1 = (MAC * 1000) // instr
## left over h
MAC = leftover_tile_h_out*tile_n_in*tile_n_out*fs
cycles_im2col = (61 + 32 * fs) * ((32 * fs) > ((fs * tile_n_in * 2) // 8) ) + (29 + (fs * tile_n_in * 2) // 8) * ((32 * fs) < ((fs * tile_n_in * 2) // 8) )
cycles_im2col_leftover = (38 + 16 * fs) * ((16 * fs) > ((fs * tile_n_in) // 8) ) + (29 + (fs * tile_n_in) // 8) * ((16 * fs) < ((fs * tile_n_in) // 8) )
n_4_2 = ((leftover_tile_h_out // 8 + ((leftover_tile_h_out % 8) > 0)) // 2)
n_4_1 = ((leftover_tile_h_out // 8 + ((leftover_tile_h_out % 8) > 0)) % 2)
cycles_chout_4_2 = (tile_n_out // 4) * (159 + 14 * ((tile_n_in*fs) // 4) + 18 * ((tile_n_in*fs) % 4))
cycles_leftover_chout_4_2 = (tile_n_out % 4) * (52 + ((tile_n_in*fs) // 4) * 5 + 8 * ((tile_n_in*fs) % 4))
cycles_chout_4_1 = (tile_n_out // 4) * (82 + ((tile_n_in*fs) //4) * 9 + 12 * ((tile_n_in*fs) % 4))
cycles_leftover_chout_4_1 = (tile_n_out % 4) * (28 + 3 * ((tile_n_in*fs) // 4) + 5 * ((tile_n_in*fs) % 4))
instr = (100 + n_4_2 * (27 + cycles_im2col + (127 + cycles_chout_4_2 + cycles_leftover_chout_4_2)) + n_4_1 * (20 + cycles_im2col_leftover + (94 + cycles_chout_4_1 + cycles_leftover_chout_4_1)))
constr2 = (MAC * 1000) // instr
## left over n
MAC = leftover_tile_nout*tile_n_in*tile_h_out*fs
cycles_im2col = (61 + 32 * fs) * ((32 * fs) > ((fs * tile_n_in * 2) // 8) ) + (29 + (fs * tile_n_in * 2) // 8) * ((32 * fs) < ((fs * tile_n_in * 2) // 8) )
cycles_im2col_leftover = (38 + 16 * fs) * ((16 * fs) > ((fs * tile_n_in) // 8) ) + (29 + (fs * tile_n_in) // 8) * ((16 * fs) < ((fs * tile_n_in) // 8) )
n_4_2 = ((tile_h_out // 8 + ((tile_h_out % 8) > 0)) // 2)
n_4_1 = ((tile_h_out // 8 + ((tile_h_out % 8) > 0)) % 2)
cycles_chout_4_2 = (leftover_tile_nout // 4) * (159 + 14 * ((tile_n_in*fs) // 4) + 18 * ((tile_n_in*fs) % 4))
cycles_leftover_chout_4_2 = (leftover_tile_nout % 4) * (52 + ((tile_n_in*fs) // 4) * 5 + 8 * ((tile_n_in*fs) % 4))
cycles_chout_4_1 = (leftover_tile_nout // 4) * (82 + ((tile_n_in*fs) //4) * 9 + 12 * ((tile_n_in*fs) % 4))
cycles_leftover_chout_4_1 = (leftover_tile_nout % 4) * (28 + 3 * ((tile_n_in*fs) // 4) + 5 * ((tile_n_in*fs) % 4))
instr = (100 + n_4_2 * (27 + cycles_im2col + (127 + cycles_chout_4_2 + cycles_leftover_chout_4_2)) + n_4_1 * (20 + cycles_im2col_leftover + (94 + cycles_chout_4_1 + cycles_leftover_chout_4_1)))
constr3 = (MAC * 1000) // instr
## left over all
MAC = leftover_tile_nout*tile_n_in*leftover_tile_h_out*fs
cycles_im2col = (61 + 32 * fs) * ((32 * fs) > ((fs * tile_n_in * 2) // 8) ) + (29 + (fs * tile_n_in * 2) // 8) * ((32 * fs) < ((fs * tile_n_in * 2) // 8) )
cycles_im2col_leftover = (38 + 16 * fs) * ((16 * fs) > ((fs * tile_n_in) // 8) ) + (29 + (fs * tile_n_in) // 8) * ((16 * fs) < ((fs * tile_n_in) // 8) )
n_4_2 = ((leftover_tile_h_out // 8 + ((leftover_tile_h_out % 8) > 0)) // 2)
n_4_1 = ((leftover_tile_h_out // 8 + ((leftover_tile_h_out % 8) > 0)) % 2)
cycles_chout_4_2 = (leftover_tile_nout // 4) * (159 + 14 * ((tile_n_in*fs) // 4) + 18 * ((tile_n_in*fs) % 4))
cycles_leftover_chout_4_2 = (leftover_tile_nout % 4) * (52 + ((tile_n_in*fs) // 4) * 5 + 8 * ((tile_n_in*fs) % 4))
cycles_chout_4_1 = (leftover_tile_nout // 4) * (82 + ((tile_n_in*fs) //4) * 9 + 12 * ((tile_n_in*fs) % 4))
cycles_leftover_chout_4_1 = (leftover_tile_nout % 4) * (28 + 3 * ((tile_n_in*fs) // 4) + 5 * ((tile_n_in*fs) % 4))
instr = (100 + n_4_2 * (27 + cycles_im2col + (127 + cycles_chout_4_2 + cycles_leftover_chout_4_2)) + n_4_1 * (20 + cycles_im2col_leftover + (94 + cycles_chout_4_1 + cycles_leftover_chout_4_1)))
constr4 = (MAC * 1000) // instr
constr = (constr1*(n_tiles_h-(leftover_tile_h_out>0))*(n_tiles_nout-(leftover_tile_nout>0)) + constr2*(n_tiles_nout-(leftover_tile_nout>0))*(leftover_tile_h_out>0) + constr3*(n_tiles_h-(leftover_tile_h_out>0))*(leftover_tile_nout>0) + constr4)//((n_tiles_h-(leftover_tile_h_out>0))*(n_tiles_nout-(leftover_tile_nout>0)) + (n_tiles_nout-(leftover_tile_nout>0))*(leftover_tile_h_out>0) + (n_tiles_h-(leftover_tile_h_out>0))*(leftover_tile_nout>0) + 1*(leftover_tile_nout>0)*(leftover_tile_h_out>0))
print(f'Conv normal MAC/cycle simulated: {(constr/1000)}')
return (tile_n_in, tile_n_out, tile_h_in, tile_h_out, (constr/1000), memory/8)
return None
def get_tiling_conv_1D_nodilation(self,
fs,
p,
stride,
dilation,
in_channels,
out_channels,
x_shape,
y_shape,
BN,
buffer_size=44000
):
s = stride
n_in = in_channels
n_out = out_channels
h_in = x_shape
# p = 0
h_out = y_shape
max_tile_n_out = n_out
max_tile_n_in = n_in
min_tile_h_in = fs
min_tile_h_out = 1
# this is to renormalize all costs
max_obj_value = 10000000000000
# constraints
input_dim = 8 * n_in * h_in
output_dim = 8 * n_out * h_out
weight_dim = 8 * n_in * n_out * fs
bn_dim = self.BitActivation * n_out * 2
buffer_total = input_dim + output_dim + weight_dim + bn_dim
if BN == 0:
buffer_total -= bn_dim
if buffer_total <= buffer_size * 8:
obj_expr = 58+(h_out/2)*(119+(n_out/4)*((14*n_in*fs/4)+159))
print(f'tile in: {n_in} x {h_in}, tile out: {n_out} x {h_out}' )
return (n_in, n_out, h_in, h_out, obj_expr, buffer_total/8)
else:
db = 2
parameters = pywrapcp.Solver.DefaultSolverParameters()
solver = pywrapcp.Solver("simple_CP", parameters)
# ottimizzato channel first per kernel HWC
tile_n_in = solver.IntVar(1, max_tile_n_in, 'tile_n_out')
tile_n_out = solver.IntVar(1, max_tile_n_out, 'tile_n_out')
tile_h_in = solver.IntVar(min_tile_h_in, h_in, 'tile_h_in')
tile_h_out = solver.IntVar(min_tile_h_out, h_out, 'tile_h_out')
zero_variable = solver.IntVar(0, 0, 'zero variable')
solver.Add(0 == (tile_h_in - ((fs - 1)*dilation+1)) % s)
solver.Add(tile_h_out * s == (tile_h_in - (fs - 1)*dilation + (s - 1)))
solver.Add(tile_n_in == n_in)
# padding added
solver.Add(solver.Max((h_in - tile_h_in - (tile_h_in - fs + 1 - p)), 0) % (tile_h_in - fs + 1) + abs(solver.Min(
solver.Max((h_in - tile_h_in - (tile_h_in - fs + 1 - p)), 0) % (tile_h_in - fs + 1), 1) - 1) * fs >= fs)
constr_in = db * 8 * tile_n_in * tile_h_in #* tile_w_in
constr_out = db * 8 * tile_n_out * tile_h_out #* tile_w_out
constr_weight = db * 8 * tile_n_in * tile_n_out * fs
constr_bn = self.BitActivation * n_out * 2
constraint_all = constr_in + constr_out + constr_weight + constr_bn + 20*32 # 20 are the + 4 added between buffers
if BN == 0:
constraint_all -= constr_bn
solver.Add(constraint_all <= buffer_size * 8)
# objective
obj_expr = solver.IntVar(0, max_obj_value, "obj_expr")
n_tiles_h = solver.IntVar(1, h_in, "n_tiles_h")
leftover_tile_h_out = solver.IntVar(0, h_out, "leftover_tile_h_out")
n_tiles_nout = solver.IntVar(1, n_out, "n_tiles_nout")
leftover_tile_nout = solver.IntVar(0, n_out, "leftover_tile_nout")
solver.Add(n_tiles_h == (h_out + tile_h_out - 1) // tile_h_out)
solver.Add(leftover_tile_h_out == (h_out + zero_variable) % tile_h_out)
solver.Add(n_tiles_nout == (n_out + tile_n_out - 1) // tile_n_out)
solver.Add(leftover_tile_nout == (n_out + zero_variable) % tile_n_out)
## principal kernel
solver.Add(obj_expr == (64 * 10000 * tile_n_out
+ constraint_all
+ 64 * 1000000 * ((tile_h_out - 1) % 16)
+ 64 * 1000000 * ((tile_n_out - 1) % 4)
+ 64 * 10000 * (leftover_tile_nout)
+ 64 * 10000 * (leftover_tile_nout % 4)
+ 64 * 10000 * (leftover_tile_h_out % 16)))
objective = solver.Maximize(obj_expr, 1)
decision_builder = solver.Phase([tile_n_in, tile_n_out, tile_h_in, tile_h_out, n_tiles_h, leftover_tile_h_out, n_tiles_nout, leftover_tile_nout],
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE)
# Create a solution collector.
collector = solver.LastSolutionCollector()
# Add the decision variables.
collector.Add(tile_n_in)
collector.Add(tile_n_out)
collector.Add(tile_h_in)
collector.Add(tile_h_out)
collector.Add(n_tiles_h)
collector.Add(leftover_tile_h_out)
collector.Add(n_tiles_nout)
collector.Add(leftover_tile_nout)
collector.Add(obj_expr)
collector.Add(constraint_all)
# Add the objective.
collector.AddObjective(obj_expr)
solver.Solve(decision_builder, [objective, collector])
if collector.SolutionCount() > 0:
best_solution = collector.SolutionCount() - 1
tile_n_in = collector.Value(best_solution, tile_n_in)
tile_n_out = collector.Value(best_solution, tile_n_out)
tile_h_in = collector.Value(best_solution, tile_h_in)
tile_h_out = collector.Value(best_solution, tile_h_out)
n_tiles_h = collector.Value(best_solution, n_tiles_h)
leftover_tile_h_out = collector.Value(best_solution, leftover_tile_h_out)
n_tiles_nout = collector.Value(best_solution, n_tiles_nout)
leftover_tile_nout = collector.Value(best_solution, leftover_tile_nout)
obj_expr = collector.Value(best_solution, obj_expr)
memory = collector.Value(best_solution, constraint_all)
if tile_h_in >= h_in:
tile_h_in = h_in
tile_h_out = int((tile_h_in -(fs - 1)*dilation + (2*p) + (s - 1))/s)
MAC = tile_n_out*tile_n_in*tile_h_out*fs
n_4_2 = ((tile_h_out // 8 + ((tile_h_out % 8) > 0)) // 2)
n_4_1 = ((tile_h_out // 8 + ((tile_h_out % 8) > 0)) % 2)
cycles_chout_4_2 = (tile_n_out // 4) * (159 + 14 * ((tile_n_in*fs) // 4) + 18 * ((tile_n_in*fs) % 4))
cycles_leftover_chout_4_2 = (tile_n_out % 4) * (52 + ((tile_n_in*fs) // 4) * 5 + 8 * ((tile_n_in*fs) % 4))
cycles_chout_4_1 = (tile_n_out // 4) * (82 + ((tile_n_in*fs) //4) * 9 + 12 * ((tile_n_in*fs) % 4))
cycles_leftover_chout_4_1 = (tile_n_out % 4) * (28 + 3 * ((tile_n_in*fs) // 4) + 5 * ((tile_n_in*fs) % 4))
instr = 62 + n_4_2 * (27 + (135 + cycles_chout_4_2 + cycles_leftover_chout_4_2)) + n_4_1 * (21 + (94 + cycles_chout_4_1 + cycles_leftover_chout_4_1))
constr1 = ( MAC * 1000 ) // instr
## left over h
MAC = tile_n_out*tile_n_in*leftover_tile_h_out*fs
n_4_2 = ((leftover_tile_h_out // 8 + ((leftover_tile_h_out % 8) > 0)) // 2)
n_4_1 = ((leftover_tile_h_out // 8 + ((leftover_tile_h_out % 8) > 0)) % 2)
cycles_chout_4_2 = (tile_n_out // 4) * (159 + 14 * ((tile_n_in*fs) // 4) + 18 * ((tile_n_in*fs) % 4))
cycles_leftover_chout_4_2 = (tile_n_out % 4) * (52 + ((tile_n_in*fs) // 4) * 5 + 8 * ((tile_n_in*fs) % 4))
cycles_chout_4_1 = (tile_n_out // 4) * (82 + ((tile_n_in*fs) //4) * 9 + 12 * ((tile_n_in*fs) % 4))
cycles_leftover_chout_4_1 = (tile_n_out % 4) * (28 + 3 * ((tile_n_in*fs) // 4) + 5 * ((tile_n_in*fs) % 4))
instr = 62 + n_4_2 * (27 + (135 + cycles_chout_4_2 + cycles_leftover_chout_4_2)) + n_4_1 * (21 + (94 + cycles_chout_4_1 + cycles_leftover_chout_4_1))
constr2 = ( MAC * 1000 ) // instr
## left over n
MAC = leftover_tile_nout*tile_n_in*tile_h_out*fs
n_4_2 = ((tile_h_out // 8 + ((tile_h_out % 8) > 0)) // 2)
n_4_1 = ((tile_h_out // 8 + ((tile_h_out % 8) > 0)) % 2)
cycles_chout_4_2 = (leftover_tile_nout // 4) * (159 + 14 * ((tile_n_in*fs) // 4) + 18 * ((tile_n_in*fs) % 4))
cycles_leftover_chout_4_2 = (leftover_tile_nout % 4) * (52 + ((tile_n_in*fs) // 4) * 5 + 8 * ((tile_n_in*fs) % 4))
cycles_chout_4_1 = (leftover_tile_nout // 4) * (82 + ((tile_n_in*fs) //4) * 9 + 12 * ((tile_n_in*fs) % 4))
cycles_leftover_chout_4_1 = (leftover_tile_nout % 4) * (28 + 3 * ((tile_n_in*fs) // 4) + 5 * ((tile_n_in*fs) % 4))
instr = 62 + n_4_2 * (27 + (135 + cycles_chout_4_2 + cycles_leftover_chout_4_2)) + n_4_1 * (21 + (94 + cycles_chout_4_1 + cycles_leftover_chout_4_1))
constr3 = ( MAC * 1000 ) // instr
## left over all
MAC = leftover_tile_nout*tile_n_in*leftover_tile_h_out*fs
n_4_2 = ((leftover_tile_h_out // 8 + ((leftover_tile_h_out % 8) > 0)) // 2)
n_4_1 = ((leftover_tile_h_out // 8 + ((leftover_tile_h_out % 8) > 0)) % 2)
cycles_chout_4_2 = (leftover_tile_nout // 4) * (159 + 14 * ((tile_n_in*fs) // 4) + 18 * ((tile_n_in*fs) % 4))
cycles_leftover_chout_4_2 = (leftover_tile_nout % 4) * (52 + ((tile_n_in*fs) // 4) * 5 + 8 * ((tile_n_in*fs) % 4))
cycles_chout_4_1 = (leftover_tile_nout // 4) * (82 + ((tile_n_in*fs) //4) * 9 + 12 * ((tile_n_in*fs) % 4))
cycles_leftover_chout_4_1 = (leftover_tile_nout % 4) * (28 + 3 * ((tile_n_in*fs) // 4) + 5 * ((tile_n_in*fs) % 4))
instr = 62 + n_4_2 * (27 + (135 + cycles_chout_4_2 + cycles_leftover_chout_4_2)) + n_4_1 * (21 + (94 + cycles_chout_4_1 + cycles_leftover_chout_4_1))
constr4 = ( MAC * 1000 ) // instr
constr = (constr1*(n_tiles_h-(leftover_tile_h_out>0))*(n_tiles_nout-(leftover_tile_nout>0)) + constr2*(n_tiles_nout-(leftover_tile_nout>0))*(leftover_tile_h_out>0) + constr3*(n_tiles_h-(leftover_tile_h_out>0))*(leftover_tile_nout>0) + constr4)//((n_tiles_h-(leftover_tile_h_out>0))*(n_tiles_nout-(leftover_tile_nout>0)) + (n_tiles_nout-(leftover_tile_nout>0))*(leftover_tile_h_out>0) + (n_tiles_h-(leftover_tile_h_out>0))*(leftover_tile_nout>0) + 1*(leftover_tile_nout>0)*(leftover_tile_h_out>0))
print(f'Conv no dilation MAC/cycle simulated: {(constr/1000)}')
return (tile_n_in, tile_n_out, tile_h_in, tile_h_out, (constr/1000), memory/8)
return None
def get_tiling_conv_1D_indirect(self, fs,
p,
stride,
dilation,
in_channels,
out_channels,
x_shape,
y_shape,
BN,
buffer_size=44000
):
s = stride
n_in = in_channels
n_out = out_channels
h_in = x_shape
# p = 0
h_out = y_shape
max_tile_n_out = n_out
max_tile_n_in = n_in
min_tile_h_in = fs
min_tile_h_out = 1
# this is to renormalize all costs
max_obj_value = sys.maxsize
# constraints
input_dim = 8 * n_in * h_in
output_dim = 8 * n_out * h_out
weight_dim = 8 * n_in * n_out * fs
bn_dim = self.BitActivation * n_out * 2
buffer_total = input_dim + output_dim + weight_dim + bn_dim
if BN == 0:
buffer_total -= bn_dim
if buffer_total <= buffer_size * 8:
obj_expr = 58+(h_out/2)*(119+(n_out/4)*((14*n_in*fs/4)+159))
print(f'tile in: {n_in} x {h_in}, tile out: {n_out} x {h_out}' )
return (n_in, n_out, h_in, h_out, obj_expr, buffer_total/8)
else:
db = 2
parameters = pywrapcp.Solver.DefaultSolverParameters()
solver = pywrapcp.Solver("simple_CP", parameters)
# ottimizzato channel first per kernel HWC
tile_n_in = solver.IntVar(1, max_tile_n_in, 'tile_n_out')
tile_n_out = solver.IntVar(1, max_tile_n_out, 'tile_n_out')
tile_h_in = solver.IntVar(min_tile_h_in, h_in, 'tile_h_in') #Temporal
tile_h_out = solver.IntVar(min_tile_h_out, h_out, 'tile_h_out') #Temporal
zero_variable = solver.IntVar(0, 0, 'zero variable')
h_out_intvar = solver.IntVar(min_tile_h_out,h_out,'h_out_intvar')
solver.Add(h_out_intvar == h_out)
if ((fs - 1)*dilation+1) <= h_in: #Receptive field size > temporal lenght -> H_in = tile_h_in
#Adding constraints for geometrical concerns.
solver.Add(0 == (tile_h_in - ((fs - 1)*dilation+1)) % s)
solver.Add(tile_h_out * s == (tile_h_in - (fs - 1)*dilation + (s - 1)))
# padding added
solver.Add(solver.Max((h_in - tile_h_in - (tile_h_in - (fs - 1)*dilation - p)), 0) % (tile_h_in - (fs - 1)*dilation + 1) + abs(solver.Min(
solver.Max((h_in - tile_h_in - (tile_h_in - (fs - 1)*dilation - p)), 0) % (tile_h_in - (fs - 1)*dilation), 1) - 1) * ((fs - 1)*dilation+1) >= ((fs - 1)*dilation+1))
solver.Add(h_in >= s*(tile_h_out*(h_out_intvar//tile_h_out)-1)-p+dilation*(fs-1)+1)
else:
solver.Add(h_in == tile_h_in )
solver.Add(h_out == tile_h_out )
solver.Add(tile_n_in == n_in)
constr_in = db * 8 * tile_n_in * tile_h_in #* tile_w_in
constr_out = db * 8 * tile_n_out * tile_h_out #* tile_w_out
constr_weight = db * 8 * tile_n_in * tile_n_out * fs
constr_bn = self.BitActivation * n_out * 2
constraint_all = constr_in + constr_out + constr_weight + constr_bn + 20*32 # 20 are the + 4 added between buffers
if BN == 0:
constraint_all -= constr_bn
solver.Add(constraint_all <= buffer_size * 8)
# objective
obj_expr = solver.IntVar(0, max_obj_value, "obj_expr")
n_tiles_h = solver.IntVar(1, h_in, "n_tiles_h")
leftover_tile_h_out = solver.IntVar(0, h_out, "leftover_tile_h_out")
n_tiles_nout = solver.IntVar(1, n_out, "n_tiles_nout")
leftover_tile_nout = solver.IntVar(0, n_out, "leftover_tile_nout")
solver.Add(n_tiles_h == (h_out + tile_h_out - 1) // tile_h_out)
solver.Add(leftover_tile_h_out == (h_out + zero_variable) % tile_h_out)
solver.Add(n_tiles_nout == (n_out + tile_n_out - 1) // tile_n_out)
solver.Add(leftover_tile_nout == (n_out + zero_variable) % tile_n_out)
## principal kernel
solver.Add(obj_expr == (64 * 10000 * tile_n_out
+ constraint_all
+ 64 * 1000000 * ((tile_h_out - 1) % 16) #Because of 8 cores -> 2 Pixels
+ 64 * 1000000 * ((tile_n_out - 1) % 4) #Because of 4x2 computation.
+ 64 * 10000 * (leftover_tile_nout)
+ 64 * 10000 * (leftover_tile_nout % 4)
+ 64 * 10000 * (leftover_tile_h_out % 16)))
objective = solver.Maximize(obj_expr, 1)
decision_builder = solver.Phase([tile_n_in, tile_n_out, tile_h_in, tile_h_out, n_tiles_h, leftover_tile_h_out, n_tiles_nout, leftover_tile_nout],
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE)
# Create a solution collector.
collector = solver.LastSolutionCollector()
# Add the decision variables.
collector.Add(tile_n_in)
collector.Add(tile_n_out)
collector.Add(tile_h_in)
collector.Add(tile_h_out)
collector.Add(n_tiles_h)
collector.Add(leftover_tile_h_out)
collector.Add(n_tiles_nout)
collector.Add(leftover_tile_nout)
collector.Add(obj_expr)
collector.Add(constraint_all)
# Add the objective.
collector.AddObjective(obj_expr)
solver.Solve(decision_builder, [objective, collector])
if collector.SolutionCount() > 0: #Calculating the theoretical cycles to compute this layer.
best_solution = collector.SolutionCount() - 1
tile_n_in = collector.Value(best_solution, tile_n_in)
tile_n_out = collector.Value(best_solution, tile_n_out)
tile_h_in = collector.Value(best_solution, tile_h_in)
tile_h_out = collector.Value(best_solution, tile_h_out)
n_tiles_h = collector.Value(best_solution, n_tiles_h)
leftover_tile_h_out = collector.Value(best_solution, leftover_tile_h_out)
n_tiles_nout = collector.Value(best_solution, n_tiles_nout)
leftover_tile_nout = collector.Value(best_solution, leftover_tile_nout)
obj_expr = collector.Value(best_solution, obj_expr)
memory = collector.Value(best_solution, constraint_all)
if tile_h_in >= h_in:
tile_h_in = h_in
tile_h_out = int((tile_h_in -(fs - 1)*dilation + (2*p) + (s - 1))/s)
MAC = tile_n_out*tile_n_in*tile_h_out*fs
n_4_2 = ((tile_h_out // 8 + ((tile_h_out % 8) > 0)) // 2)
n_4_1 = ((tile_h_out // 8 + ((tile_h_out % 8) > 0)) % 2)
cycles_chout_4_2 = (tile_n_out // 4) * (164 + fs * (29+14*(tile_n_in // 4) + 22 * (tile_n_in % 4)))
cycles_leftover_chout_4_2 = (tile_n_out%4)*(49+fs*(15+5*(tile_n_in//4)+8*(tile_n_in%4)))
cycles_chout_4_1 = (tile_n_out//4)*(90+fs*(15+9*(tile_n_in//4)+17*(tile_n_in%4)))
cycles_leftover_chout_4_1 = (tile_n_out%4)*(43+fs*(8+2*(tile_n_in//4)+7*(tile_n_in%4)))
instr = (82+(n_4_2*(62+8*fs+(110+cycles_chout_4_2+cycles_leftover_chout_4_2)) + n_4_1*(36+4*fs+(87+cycles_chout_4_1+cycles_leftover_chout_4_1))))
constr1 = (MAC * 1000) // instr
## left over h
MAC = leftover_tile_h_out*tile_n_in*tile_n_out*fs
n_4_2 = ((leftover_tile_h_out // 8 + ((leftover_tile_h_out % 8) > 0)) // 2)
n_4_1 = ((leftover_tile_h_out // 8 + ((leftover_tile_h_out % 8) > 0)) % 2)
cycles_chout_4_2 = (tile_n_out // 4) * (164 + fs * (29+14*(tile_n_in // 4) + 22 * (tile_n_in % 4)))
cycles_leftover_chout_4_2 = (tile_n_out%4)*(49+fs*(15+5*(tile_n_in//4)+8*(tile_n_in%4)))
cycles_chout_4_1 = (tile_n_out//4)*(90+fs*(15+9*(tile_n_in//4)+17*(tile_n_in%4)))
cycles_leftover_chout_4_1 = (tile_n_out%4)*(43+fs*(8+2*(tile_n_in//4)+7*(tile_n_in%4)))
instr = (82+(n_4_2*(62+8*fs+(110+cycles_chout_4_2+cycles_leftover_chout_4_2)) + n_4_1*(36+4*fs+(87+cycles_chout_4_1+cycles_leftover_chout_4_1))))
constr2 = (MAC * 1000) // instr
## left over n
MAC = leftover_tile_nout*tile_n_in*tile_h_out*fs
n_4_2 = ((tile_h_out // 8 + ((tile_h_out % 8) > 0)) // 2)
n_4_1 = ((tile_h_out // 8 + ((tile_h_out % 8) > 0)) % 2)
cycles_chout_4_2 = (leftover_tile_nout // 4) * (164 + fs * (29+14*(tile_n_in // 4) + 22 * (tile_n_in % 4)))
cycles_leftover_chout_4_2 = (leftover_tile_nout%4)*(49+fs*(15+5*(tile_n_in//4)+8*(tile_n_in%4)))
cycles_chout_4_1 = (leftover_tile_nout//4)*(90+fs*(15+9*(tile_n_in//4)+17*(tile_n_in%4)))
cycles_leftover_chout_4_1 = (leftover_tile_nout%4)*(43+fs*(8+2*(tile_n_in//4)+7*(tile_n_in%4)))
instr = (82+(n_4_2*(62+8*fs+(110+cycles_chout_4_2+cycles_leftover_chout_4_2)) + n_4_1*(36+4*fs+(87+cycles_chout_4_1+cycles_leftover_chout_4_1))))
constr3 = (MAC * 1000) // instr
## left over all
MAC = leftover_tile_nout*tile_n_in*leftover_tile_h_out*fs
n_4_2 = ((leftover_tile_h_out // 8 + ((leftover_tile_h_out % 8) > 0)) // 2)
n_4_1 = ((leftover_tile_h_out // 8 + ((leftover_tile_h_out % 8) > 0)) % 2)
cycles_chout_4_2 = (leftover_tile_nout // 4) * (164 + fs * (29+14*(tile_n_in // 4) + 22 * (tile_n_in % 4)))
cycles_leftover_chout_4_2 = (leftover_tile_nout%4)*(49+fs*(15+5*(tile_n_in//4)+8*(tile_n_in%4)))
cycles_chout_4_1 = (leftover_tile_nout//4)*(90+fs*(15+9*(tile_n_in//4)+17*(tile_n_in%4)))
cycles_leftover_chout_4_1 = (leftover_tile_nout%4)*(43+fs*(8+2*(tile_n_in//4)+7*(tile_n_in%4)))
instr = (82+(n_4_2*(62+8*fs+(110+cycles_chout_4_2+cycles_leftover_chout_4_2)) + n_4_1*(36+4*fs+(87+cycles_chout_4_1+cycles_leftover_chout_4_1))))
constr4 = (MAC * 1000) // instr
constr = (constr1*(n_tiles_h-(leftover_tile_h_out>0))*(n_tiles_nout-(leftover_tile_nout>0)) + constr2*(n_tiles_nout-(leftover_tile_nout>0))*(leftover_tile_h_out>0) + constr3*(n_tiles_h-(leftover_tile_h_out>0))*(leftover_tile_nout>0) + constr4)//((n_tiles_h-(leftover_tile_h_out>0))*(n_tiles_nout-(leftover_tile_nout>0)) + (n_tiles_nout-(leftover_tile_nout>0))*(leftover_tile_h_out>0) + (n_tiles_h-(leftover_tile_h_out>0))*(leftover_tile_nout>0) + 1*(leftover_tile_nout>0)*(leftover_tile_h_out>0))
print(f'Conv indirect MAC/cycle simulated: {(constr/1000)}')
return (tile_n_in, tile_n_out, tile_h_in, tile_h_out, (constr/1000), memory/8)
return None
``` |
{
"source": "jmiiller/dns_shark",
"score": 2
} |
#### File: dns_shark/dns_shark/__main__.py
```python
from dns_shark.command_line_parsing import create_parser
import sys
from argparse import ArgumentParser, Namespace
from dns_shark.resource_record import ResourceRecord
from typing import List
from dns_shark.resolver_core import ResolverCore
from dns_shark.dns_resolver import Resolver
from dns_shark.errors.dns_format_error import DNSFormatError
from dns_shark.errors.dns_name_error import DNSNameError
from dns_shark.errors.dns_not_implemented_error import DNSNotImplementedError
from dns_shark.errors.dns_server_failure_error import DNSServerFailureError
from dns_shark.errors.dns_refused_error import DNSRefusedError
from dns_shark.errors.dns_no_matching_resource_record_error import DNSNoMatchingResourceRecordError
from dns_shark.errors.dns_zero_counter_error import DNSZeroCounterError
def main():
parser: ArgumentParser = create_parser()
args: Namespace = parser.parse_args(sys.argv[1:])
dns_server_ip: str = args.dns_server_ip.pop()
domain_name: str = args.domain_name.pop()
main_helper(Resolver(), domain_name, dns_server_ip, args.ipv6 is not None, args.verbose is not None)
exit(0)
def main_helper(resolver: Resolver, domain_name: str, dns_server_ip: str, ipv6: bool, verbose: bool):
try:
answers: List[ResourceRecord] = resolver.ask(domain_name, dns_server_ip, ipv6, verbose)
except (DNSFormatError,
DNSNameError,
DNSNotImplementedError,
DNSServerFailureError,
DNSRefusedError,
DNSNoMatchingResourceRecordError,
DNSZeroCounterError) as e:
print("")
print(e)
else:
ResolverCore.print_answers(domain_name, answers)
if __name__ == '__main__':
main()
```
#### File: test/test_resolver_core/test_matching_authoritative_response_ipv6.py
```python
from unittest.mock import Mock
import unittest
from dns_shark.resolver_core import ResolverCore
from dns_shark.resource_record import ResourceRecord
from typing import List
class MatchingAuthoritativeResponseTest(unittest.TestCase):
"""
Unit testing for resolver_core.
"""
@classmethod
def setUpClass(cls):
"""
Initialize test values used in the tests.
"""
cls.authoritative_response: bytes = bytes.fromhex('5c00840000010001000000000377777706676f6f676c6503636f6d000'
'01c0001c00c001c00010000012c00102607f8b0400a08000000000000'
'002004')
cls.mock_socket: Mock = Mock(**{'recv.return_value': cls.authoritative_response})
cls.mock_random: Mock = Mock(**{'randint.return_value': 0x5c00})
def test_matching_authoritative_response_ipv6(self):
"""
Test case for when the first response given by the queried dns name server is an authoritative response with
an ipv6 resource record type, which is what was asked for.
"""
resolver: ResolverCore = ResolverCore(self.mock_socket, False, "172.16.17.32", self.mock_random)
answers: List[ResourceRecord] = resolver.resolve_domain_name("www.google.com", "172.16.17.32", 28)
expected_answer: List[ResourceRecord] = [ResourceRecord('www.google.com', 28, 1, 300, 16, 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b')]
self.assertEqual(answers, expected_answer)
```
#### File: test/test_resolver_core/test_name_server_response_then_authoritative_response.py
```python
from unittest.mock import Mock
import unittest
from dns_shark.resolver_core import ResolverCore
from dns_shark.resource_record import ResourceRecord
from typing import List
class NameServerResponseThenAuthoritativeResponseTest(unittest.TestCase):
"""
Unit testing for resolver_core.
"""
@classmethod
def setUpClass(cls):
"""
Initialize test values used in the tests.
"""
cls.first_response: bytes = bytes.fromhex('0581800000010000000d000e0377777706676f6f676c6503636f6d0000010001c'
'017000200010002a300001401610c67746c642d73657276657273036e657400c0'
'17000200010002a30000040162c02ec017000200010002a30000040163c02ec01'
'7000200010002a30000040164c02ec017000200010002a30000040165c02ec017'
'000200010002a30000040166c02ec017000200010002a30000040167c02ec0170'
'00200010002a30000040168c02ec017000200010002a30000040169c02ec01700'
'0200010002a3000004016ac02ec017000200010002a3000004016bc02ec017000'
'200010002a3000004016cc02ec017000200010002a3000004016dc02ec02c0001'
'00010002a3000004c005061ec04c000100010002a3000004c0210e1ec05c00010'
'0010002a3000004c01a5c1ec06c000100010002a3000004c01f501ec07c000100'
'010002a3000004c00c5e1ec08c000100010002a3000004c023331ec09c0001000'
'10002a3000004c02a5d1ec0ac000100010002a3000004c036701ec0bc00010001'
'0002a3000004c02bac1ec0cc000100010002a3000004c0304f1ec0dc000100010'
'002a3000004c034b21ec0ec000100010002a3000004c029a21ec0fc0001000100'
'02a3000004c037531ec02c001c00010002a300001020010503a83e00000000000'
'000020030')
cls.second_response: bytes = bytes.fromhex('64eb800000010000000400080377777706676f6f676c6503636f6d0000010001'
'c010000200010002a3000006036e7332c010c010000200010002a3000006036e'
'7331c010c010000200010002a3000006036e7333c010c010000200010002a300'
'0006036e7334c010c02c001c00010002a3000010200148604802003400000000'
'0000000ac02c000100010002a3000004d8ef220ac03e001c00010002a3000010'
'2001486048020032000000000000000ac03e000100010002a3000004d8ef200a'
'c050001c00010002a30000102001486048020036000000000000000ac0500001'
'00010002a3000004d8ef240ac062001c00010002a30000102001486048020038'
'000000000000000ac062000100010002a3000004d8ef260a')
cls.authoritative_response: bytes = bytes.fromhex('0a7b840000010001000000000377777706676f6f676c6503636f6d000'
'0010001c00c000100010000012c0004acd90ec4')
cls.mock_socket: Mock = Mock(**{'recv.side_effect': [cls.first_response,
cls.second_response,
cls.authoritative_response]})
cls.mock_random: Mock = Mock(**{'randint.side_effect': [0x0581, 0x64eb, 0x0a7b]})
def test_name_server_response_then_authoritative(self):
"""
Test case for when the first two responses non-authoritative responses and the final one is authoritative and
contains a matching resource record.
The first two responses both contain the ip address a name server, so we do
not need to do a separate lookup for the ip address of the name server.
"""
resolver: ResolverCore = ResolverCore(self.mock_socket, False, "192.168.127.12", self.mock_random)
answers: List[ResourceRecord] = resolver.resolve_domain_name("www.google.com", "192.168.127.12", 1)
expected_answer: List[ResourceRecord] = [ResourceRecord('www.google.com', 1, 1, 300, 4, '192.168.3.11')]
self.assertEqual(answers, expected_answer)
```
#### File: test/test_resolver_core/test_zero_counter_error.py
```python
from unittest.mock import Mock
import unittest
from dns_shark.resolver_core import ResolverCore
from dns_shark.errors.dns_zero_counter_error import DNSZeroCounterError
class DNSZeroCounterErrorTest(unittest.TestCase):
"""
Unit testing for resolver_core.
"""
@classmethod
def setUp(cls):
"""
Initialize test values used in the tests.
"""
cls.first_response: bytes = bytes.fromhex('150e800000010000000d000c0377777706676f6f676c6503636f6d00001c0001'
'c017000200010002a300001401650c67746c642d73657276657273036e657400'
'c017000200010002a30000040162c02ec017000200010002a3000004016ac02e'
'c017000200010002a3000004016dc02ec017000200010002a30000040169c02e'
'c017000200010002a30000040166c02ec017000200010002a30000040161c02e'
'c017000200010002a30000040167c02ec017000200010002a30000040168c02e'
'c017000200010002a3000004016cc02ec017000200010002a3000004016bc02e'
'c017000200010002a30000040163c02ec017000200010002a30000040164c02e'
'c02c000100010002a3000004c00c5e1ec02c001c00010002a300001020010502'
'1ca100000000000000000030c04c000100010002a3000004c0210e1ec04c001c'
'00010002a300001020010503231d00000000000000020030c05c000100010002'
'a3000004c0304f1ec05c001c00010002a3000010200105027094000000000000'
'00000030c06c000100010002a3000004c037531ec06c001c00010002a3000010'
'20010501b1f900000000000000000030c07c000100010002a3000004c02bac1e'
'c07c001c00010002a30000102001050339c100000000000000000030c08c0001'
'00010002a3000004c023331ec09c000100010002a3000004c005061e')
cls.second_response: bytes = bytes.fromhex('e13b800000010000000400080377777706676f6f676c6503636f6d00001c000'
'1c010000200010002a3000006036e7332c010c010000200010002a300000603'
'6e7331c010c010000200010002a3000006036e7333c010c010000200010002a'
'3000006036e7334c010c02c001c00010002a300001020014860480200340000'
'00000000000ac02c000100010002a3000004d8ef220ac03e001c00010002a30'
'000102001486048020032000000000000000ac03e000100010002a3000004d8'
'ef200ac050001c00010002a30000102001486048020036000000000000000ac'
'050000100010002a3000004d8ef240ac062001c00010002a300001020014860'
'48020038000000000000000ac062000100010002a3000004d8ef260a')
cls.third_response: bytes = bytes.fromhex('c98f840000010001000000000377777706676f6f676c6503636f6d00'
'001c0001c00c001c00010000012c00102607f8b0400a080300000000'
'00002004')
cls.mock_socket: Mock = Mock(**{'recv.side_effect': [cls.first_response,
cls.second_response,
cls.third_response]})
cls.mock_random: Mock = Mock(**{'randint.side_effect': [0x150e, 0xe13b, 0xc98f]})
def test_zero_counter_error_counter_zero(self):
"""
Test case for when the counter is set to zero and then fully exhausted
"""
resolver: ResolverCore = ResolverCore(self.mock_socket, False, "1.2.3.4", self.mock_random, 0)
self.assertRaises(DNSZeroCounterError, resolver.resolve_domain_name, "www.google.ca", "1.2.3.4", 1)
def test_zero_counter_error_counter_one(self):
"""
Test case for when the counter is
"""
resolver: ResolverCore = ResolverCore(self.mock_socket, False, "1.2.3.4", self.mock_random, 1)
self.assertRaises(DNSZeroCounterError, resolver.resolve_domain_name, "www.google.ca", "192.168.3.11", 1)
def test_zero_counter_error_counter_two(self):
"""
Test case for when the counter is set to two and exhausted.
"""
resolver: ResolverCore = ResolverCore(self.mock_socket, False, "1.2.3.4", self.mock_random, 2)
self.assertRaises(DNSZeroCounterError, resolver.resolve_domain_name, "www.google.ca", "192.168.3.11", 1)
``` |
{
"source": "jmike1211/bitfinex-api-py",
"score": 3
} |
#### File: jmike1211/bitfinex-api-py/bfxmongo.py
```python
import pymongo
import os
import sys
from config import Config
class useMongo():
def __init__(self):
self.client = pymongo.MongoClient(Config.config()["mongoConnect"])
self.db = self.client['bfxtest']
def mongofindone(self, token={}, collectionName="account"):
collection = self.db[collectionName]
result = collection.find_one(token)
return result
def mongofindall(self, token, collectionName="account"):
collection = self.db[collectionName]
result = collection.find(token)
return result
def mongoinsertone(self, token, collectionName="account"):
collection = self.db[collectionName]
result = collection.insert_one(token)
return result
def mongodeleteone(self, token, collectionName="account"):
collection = self.db[collectionName]
result = collection.delete_one(token)
return result
def mongoupdateone(self, origintoken, changetoken, collectionName="account"):
collection = self.db[collectionName]
modify = collection.find_one(origintoken)
print(modify)
#modify[changename] = changetoken
result = collection.update_one(origintoken, {'$set': changetoken})
return result
def mongoupsertone(self, origintoken, changetoken, collectionName="account"):
collection = self.db[collectionName]
modify = collection.find_one(origintoken)
print(modify)
#modify[changename] = changetoken
result = collection.update_one(origintoken, {'$set': changetoken},upsert=True)
return result
#result = useMongo().mongodelone({"id" : "20170101"})
#print(result)
"""
result = useMongo().mongofindone({},"frrrate")
print(result)
if result == None:
print("aaaaa")
else:
print(result["frr"])
mongoResult = {}
mongoResult["frr"] = round(0.000325613,5)
mongoResult["hask"] = round(0.000842145613,5)
mongoResult["lprice"] = round(0.00017313,5)
result = useMongo().mongoupdateone({},mongoResult,"frrrate")
print(result)
print(result.matched_count, result.modified_count)
"""
```
#### File: jmike1211/bitfinex-api-py/calfundingrate.py
```python
from bfxmongo import useMongo
class calRate:
def fundingRate():
try:
fundingRate = useMongo().mongofindone({},"frrrate")
print(fundingRate)
frrRate = fundingRate["frr"]
days = 2
if fundingRate["frr"] > 0.00055: # 20%
if fundingRate["dayhigh"] >= 0.001: #40%
frrRate = (fundingRate["frr"] + fundingRate["dayhigh"])/2
elif fundingRate["frr"] >= fundingRate["dayhigh"]: #40%
frrRate = (fundingRate["lprice"] + fundingRate["dayhigh"])/2
days = 14
else:
if fundingRate["lprice"] < 0.00019: #7%
frrRate = 0.00025
elif fundingRate["lprice"] > 0.0004: #15%
frrRate = (fundingRate["frr"] + fundingRate["lprice"])/2
elif fundingRate["frr"] >= fundingRate["dayhigh"]:
frrRate = (fundingRate["dayhigh"] + fundingRate["lprice"])/2
except:
frrRate = 0.00031123 #10%
days = 2
return frrRate, days
#a, b = calRate.fundingRate()
``` |
{
"source": "jmikedupont2/aws3transcribe",
"score": 3
} |
#### File: jmikedupont2/aws3transcribe/create_buckets.py
```python
import logging
import boto3
from botocore.exceptions import ClientError
from keys import access_key, secret_access_key
region = 'ap-southeast-1'
bucket_name = ''
def create_bucket(bucket_name, region=region):
try:
if region is None:
s3_client = boto3.client('s3', aws_access_key_id = access_key, aws_secret_access_key = secret_access_key)
bucket_name = input("Enter a bucket name of your choice. Please use only lowercase letters and numbers. ")
s3_client.create_bucket(Bucket=bucket_name)
print("SUCCESS !!! Go check your S3 page.\n")
else:
s3_client = boto3.client('s3', aws_access_key_id = access_key, aws_secret_access_key = secret_access_key, region_name=region)
location = {'LocationConstraint': region}
bucket_name = input("Enter a bucket name of your choice. Please use only lowercase letters and numbers. ")
s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=location)
print("SUCCESS !!! Go check your S3 page.\n")
except ClientError as e:
logging.error(e)
print("Something happened, please try using a different name for bucket.\n")
return False
return True
s3_client = boto3.client('s3', aws_access_key_id = access_key, aws_secret_access_key = secret_access_key)
response= s3_client.list_buckets()
print("Found "+ str(len(response["Buckets"])) + " bucket(s) \n" )
# FETCH ALL AVAILABLE BUCKETS UNDER YOUR S3 INSTANCE
for x in range(len(response["Buckets"])):
print(x," ",response["Buckets"][x]["Name"] ) #print bucket names
create_bucket(bucket_name)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.